content
stringlengths 5
1.05M
|
---|
# Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 Frank-Rene Schaefer;
#_______________________________________________________________________________
import os
import sys
sys.path.insert(0, os.environ["QUEX_PATH"])
from quex.DEFINITIONS import QUEX_PATH
import quex.engine.misc.error as error
from quex.engine.misc.interval_handling import Interval, NumberSet
from quex.constants import INTEGER_MAX
import re
import fnmatch
from copy import deepcopy, copy
unicode_db_directory = QUEX_PATH + "/quex/engine/codec_db/unicode/database"
comment_deleter_re = re.compile(r"#[^\n]*")
def open_data_base_file(Filename):
try:
fh = open(unicode_db_directory + "/" + Filename, "r")
except:
error.log("Fatal---Unicode Database File '%s' not found!\n" % Filename + \
"QUEX_PATH='%s'\n" % QUEX_PATH + \
"Unicode Database Directory: '%s'" % unicode_db_directory)
return fh
def parse_table(Filename, IntervalColumnList=[], NumberColumnList=[], NumberListColumnList=[], CommentF=False):
"""Columns in IntervalColumnList --> converted to Interval() objects
NumberColumnList --> converted to integers (hex numbers)
NumberListColumnList --> converted to integer list (hex numbers)
"""
fh = open_data_base_file(Filename)
record_set = []
for line in fh.readlines():
comment_idx = line.find("#")
comment = None
if comment_idx != -1:
comment = line[comment_idx+1:]
line = line[:comment_idx]
if line == "" or line.isspace():
continue
# append content to record set
cells = [x.strip() for x in line.split(";")]
for i in IntervalColumnList:
fields = cells[i].split("..") # range: value0..value1
assert len(fields) in [1, 2]
if len(fields) == 2:
begin = int("0x" + fields[0], 16)
end = int("0x" + fields[1], 16) + 1
else:
begin = int("0x" + fields[0], 16)
end = int("0x" + fields[0], 16) + 1
cells[i] = Interval(begin, end)
for i in NumberColumnList:
cells[i] = int("0x" + cells[i], 16)
for i in NumberListColumnList:
nl = []
for n in cells[i].split():
nl.append(int("0x" + n, 16))
cells[i] = nl
# Sometimes, the comment is useful
if CommentF:
cells.append(comment)
record_set.append(cells)
# There is no need to decouple here, since the record_set is created
# each time that the function is called.
return record_set
def convert_table_to_associative_map(table, ValueColumnIdx, ValueType, KeyColumnIdx):
"""Produces a dictionary that maps from 'keys' to NumberSets. The
number sets represent the code points for which the key (property)
is valid.
ValueColumnIdx: Column that contains the character code interval or
string to which one wishes to map.
KeyColmnIdx: Column that contains the 'key' to be used for the map
self.db = database to contain the associative map.
"""
db = {}
if ValueType == "NumberSet":
for record in table:
key = record[KeyColumnIdx].strip()
key = key.replace(" ", "_")
value = record[ValueColumnIdx]
if type(value) == int: value = Interval(value)
# 'quick_append()' triggered assert!
db.setdefault(key, NumberSet()).add_interval(value)
elif ValueType == "number" or ValueType == "string":
for record in table:
key = record[KeyColumnIdx].strip()
key = key.replace(" ", "_")
value = record[ValueColumnIdx]
db[key] = value
else:
raise BaseException("ValueType = '%s' unknown.\n" % ValueType)
# if the content was a number set, it might be simplified, try it.
if ValueType == "NumberSet":
for key, number_set in list(db.items()):
number_set.clean()
return db
def load_db(DB_Filename, ValueType, ValueColumnIdx, KeyColumnIdx):
"""Loads a database contained in file 'DB_Filename'. Creates a python dictionary
that maps from a string (contained in column KeyColumnIdx) to a number set
or a single number (contained in column ValueColumnIdx).
NOTE: The 'key' maybe for example the property value. The
'value' is the number set it points to. This maybe
confusing.
"""
if ValueType == "NumberSet":
table = parse_table(DB_Filename, IntervalColumnList=[ValueColumnIdx])
elif ValueType == "number":
table = parse_table(DB_Filename, NumberColumnList=[ValueColumnIdx])
db = convert_table_to_associative_map(table, ValueColumnIdx, ValueType, KeyColumnIdx)
return db
class PropertyInfo:
def __init__(self, Name, Alias, Type, RelatedPropertyInfoDB):
"""Alias = short form of Name or Value.
"""
self.name = Name
self.alias = Alias
self.type = Type
self.alias_to_name_map = {} # map value alias to value
# # NOTE: Not all values may have aliases!
self.code_point_db = None # map value (not alias) to number set or number
self.related_property_info_db = RelatedPropertyInfoDB
# Some values may be based on combinations of values. For those, the
# following maps are required.
self.alias_to_alias_combination_db = {}
self.name_to_alias_map = {}
def __repr__(self):
assert self.type in ["Binary", "Catalog", "Enumerated", "String", "Miscellaneous", "Numeric"], \
"self.type = " + repr(self.type)
txt = "NAME = '%s'\n" % self.name
txt += "ALIAS = '%s'\n" % self.alias
txt += "TYPE = '%s'\n" % self.type
if self.type == "Binary":
txt += "VALUE_ALIASES = (Binary has no values)\n"
else:
txt += "VALUE_ALIASES = {\n "
txt += self.get_value_list_help(INTEGER_MAX).replace(", ", ",\n ")
txt += "\n}\n"
return txt
def get_character_set(self, Value=None):
"""Returns the character set that corresponds to 'Property==Value'.
'Value' can be a property value or a property value alias.
For binary properties 'Value' must be None.
"""
assert self.type != "Binary" or Value is None
def get_value_combination(CmbAlias):
result = []
for alias in self.alias_to_alias_combination_db[CmbAlias]:
name = self.alias_to_name_map.get(alias)
if name is None:
return "Unicode database error: no name related to alias '%s'" % alias
result.append(name)
return result
if self.type != "Binary" and Value is None:
return "Property '%s' requires a value setting.\n" % self.name + \
"Possible Values: " + \
self.get_value_list_help()
if self.code_point_db is None:
self.init_code_point_db()
if self.type == "Binary":
# Decouple, since we refer to an internal database
return deepcopy(self.code_point_db)
adapted_value = Value.replace(" ", "_")
if adapted_value in self.code_point_db:
# 'value' is present as name in the code point database
value = adapted_value
elif Value in list(self.alias_to_name_map.keys()):
# 'value' is present as alias in code pointer database
value = self.alias_to_name_map[adapted_value]
elif Value in list(self.alias_to_alias_combination_db.keys()):
# 'value' is present as a combination of aliases
value = get_value_combination(adapted_value)
elif adapted_value in self.name_to_alias_map:
# The value was a combination of values
value = get_value_combination(self.name_to_alias_map[adapted_value])
else:
# -- WILDCARD MATCH: Results in a list of property values
character_set = self.__wildcard_value_match(adapted_value)
if character_set is None:
return "Property '%s' cannot have a value or value alias '%s'.\n" % (self.name, Value) + \
"Possible Values: " + \
self.get_value_list_help()
# No need to decouple, since character is not a reference to
# internal database (for safety, do it)
return deepcopy(character_set)
if type(value) == list:
result = NumberSet()
for element in value:
if element == "Unassigned": continue
entry = self.code_point_db.get(element)
if entry is None:
return "%s/%s is not supported by Unicode database." % (self.name, repr(element))
result.unite_with(entry)
else:
result = self.code_point_db.get(value)
if result is None:
return "%s/%s is not supported by Unicode database." % (self.name, repr(value))
# Reference to internal database --> decouple with 'deepcopy'
return deepcopy(result)
def init_code_point_db(self):
if self.alias in ["na", "na1", "nv", "gc", "bc", "isc"]:
# Name
# Unicode 1 Name
# Numeric Value
# General Category
# Bidi Class
self.related_property_info_db.load_UnicodeData()
return
if self.type == "Catalog":
if self.alias == "blk":
self.code_point_db = load_db("Blocks.txt", "NumberSet", 0, 1)
elif self.alias == "age":
self.code_point_db = load_db("DerivedAge.txt", "NumberSet", 0, 1)
elif self.alias == "sc":
self.code_point_db = load_db("Scripts.txt", "NumberSet", 0, 1)
else:
return
elif self.type == "Binary":
if self.alias in ["AHex", "Bidi_C", "Dash", "Dep", "Dia",
"Ext", "Hex", "Hyphen", "IDSB", "IDST", "Ideo", "Join_C",
"LOE", "NChar", "OAlpha", "ODI", "OGr_Ext", "OIDC", "OIDS",
"OLower", "OMath", "OUpper", "Pat_Syn", "Pat_WS", "QMark",
"Radical", "SD", "STerm", "Term", "UIdeo", "VS", "WSpace"]:
filename = "PropList.txt"
elif self.alias == "Bidi_M":
filename = "extracted/DerivedBinaryProperties.txt"
elif self.alias in ["Alpha", "DI", "Gr_Base", "Gr_Ext",
"Gr_Link", "IDC", "IDS", "Math", "Lower", "Upper", "XIDC", "XIDS" ]:
filename = "DerivedCoreProperties.txt"
elif self.alias == "Comp_Ex":
filename = "DerivedNormalizationProps.txt"
elif self.alias == "CE":
self.related_property_info_db.load_Composition_Exclusion()
return
else:
return
self.related_property_info_db.load_binary_properties(filename)
elif self.type == "Enumerated":
try:
filename = {
"Numeric_Type": "extracted/DerivedNumericType.txt",
"Joining_Type": "extracted/DerivedJoiningType.txt",
"Joining_Group": "extracted/DerivedJoiningGroup.txt",
"Word_Break": "auxiliary/WordBreakProperty.txt",
"Sentence_Break": "auxiliary/SentenceBreakProperty.txt",
"Grapheme_Cluster_Break": "auxiliary/GraphemeBreakProperty.txt",
"Hangul_Syllable_Type": "HangulSyllableType.txt",
"Line_Break": "extracted/DerivedLineBreak.txt",
"Decomposition_Type": "extracted/DerivedDecompositionType.txt",
"East_Asian_Width": "extracted/DerivedEastAsianWidth.txt",
"Canonical_Combining_Class": "extracted/DerivedCombiningClass.txt",
}[self.name]
except:
print("warning: no database file for property `%s'." % self.name)
return
self.code_point_db = load_db(filename, "NumberSet", 0, 1)
elif self.type == "Miscellaneous":
pass # see first check
def get_value_list_help(self, MaxN=20, OpeningBracket="", ClosingBracket=""):
if self.code_point_db is None:
self.init_code_point_db()
the_list = sorted(self.code_point_db.keys())
n = min(len(the_list), MaxN)
selection = the_list[:n]
selection.sort()
txt = ""
alias_name_pair_list = list(self.alias_to_name_map.items())
for element in selection:
if element == "": continue
txt += OpeningBracket + element
for alias, name in alias_name_pair_list:
if element == name:
txt += "(%s)" % alias
break
txt += ClosingBracket + ", "
if n != len(the_list): txt += "... (%i more)" % (len(the_list) - n)
else: txt = txt[:-2] + "."
return txt
def get_wildcard_value_matches(self, WildCardValue):
"""Does not consider value aliases!"""
value_candidates = list(self.code_point_db.keys())
match_value_list = fnmatch.filter(value_candidates, WildCardValue)
match_value_list.sort()
# No need to decouple, match_value_list is generated new for each call.
return match_value_list
def __wildcard_value_match(self, WildCardValue):
result = NumberSet()
value_list = self.get_wildcard_value_matches(WildCardValue)
if len(value_list) == 0:
return None
for value in value_list:
result.unite_with(NumberSet(self.code_point_db[value]))
# No decoupling, since result is computed each fresh and new
return result
class PropertyInfoDB:
def __init__(self):
self.property_name_to_alias_map = {} # map: property alias to property name
self.db = {} # map: property alias to property information
self.__code_point_to_name_db = {}
def __getitem__(self, PropertyName):
if self.db == {}: self.init_db()
if PropertyName in list(self.db.keys()):
return self.db[PropertyName]
elif PropertyName in list(self.property_name_to_alias_map.keys()):
return self.db[self.property_name_to_alias_map[PropertyName]]
else:
return None # "<unknown property or alias '%s'>" % PropertyName
def get_property_value_matches(self, PropertyName, Value):
assert Value is not None
if self.db == {}: self.init_db()
property = self[PropertyName]
if not isinstance(property, PropertyInfo):
txt = property
txt += "Properties: " + self.get_property_names()
return txt
if property.type == "Binary":
if Value is not None:
return "Binary property '%s' cannot have a value.\n" % PropertyName + \
"Received '%s = %s'." % (PropertyName, Value)
return property.get_wildcard_value_matches(Value)
def get_character_set(self, PropertyName, Value=None, Fh=-1):
"""Returns the character set that corresponds to 'Property==Value'.
'Property' can be a property name or a property alias.
'Value' can be a property value or a property value alias.
For binary properties 'Value' must be None.
RETURNS: NumberSet in case of success.
str in case an error occurred. String describes the problem.
"""
if self.db == {}: self.init_db()
error.verify_word_in_list(PropertyName, self.get_property_name_list(),
"Unknown Unicode property '%s'" % PropertyName, Fh, ExitF=True)
property = self[PropertyName]
if property.type == "Binary":
if Value is not None:
return "Binary property '%s' cannot have a value.\n" % PropertyName + \
"Received '%s = %s'." % (PropertyName, Value)
elif Value is None:
return "Non-Binary property '%s' must have a value.\n" % PropertyName + \
"Expected something like '%s = Value'.\n" % PropertyName + \
"Possible Values: " + \
property.get_value_list_help()
return property.get_character_set(Value)
def init_db(self):
self.__parse_property_name_alias_and_type()
self.__parse_property_value_and_value_aliases()
def __parse_property_name_alias_and_type(self):
fh = open_data_base_file("PropertyAliases.txt")
# -- skip anything until the first line that contains '======'
line = fh.readline()
while line != "":
if line.find("# ==================") != -1: break
line = fh.readline()
property_type = "none"
for line in fh.readlines():
line = line.strip()
if line.startswith("#") and line.find("Properties") != -1:
property_type = line.split()[1]
continue
line = comment_deleter_re.sub("", line)
if line.isspace() or line == "": continue
# append content to record set
fields = [x.strip() for x in line.split(";")]
property_alias = fields[0]
property_name = fields[1]
self.db[property_alias] = PropertyInfo(property_name, property_alias, property_type, self)
self.property_name_to_alias_map[property_name] = property_alias
def __parse_property_value_and_value_aliases(self):
"""NOTE: Function __parse_property_name_alias_and_type() **must be called**
before this function.
"""
assert self.db != {}
table = parse_table("PropertyValueAliases.txt", CommentF=True)
for row in table:
property_alias = row[0].strip()
property_value_alias = row[1].strip()
property_value = row[2].replace(" ", "_").strip()
# if property db has been parsed before, this shall not fail
property_info = self.db[property_alias]
# The 'General Category' property is different, in the sense that
# important information may be stored in comments.
if property_alias == "gc" and row[-1] is not None:
combination = [x.strip() for x in row[-1].split("|")]
property_info.alias_to_alias_combination_db[property_value_alias] = combination
property_info.name_to_alias_map[property_value] = property_value_alias
else:
property_info.alias_to_name_map[property_value_alias] = property_value
def load_binary_properties(self, DB_Filename):
# property descriptions working with 'property names'
db = load_db(DB_Filename, "NumberSet", 0, 1)
for key, number_set in list(db.items()):
if key in self.property_name_to_alias_map:
property_name_alias = self.property_name_to_alias_map[key]
else:
property_name_alias = key
property = self.db[property_name_alias]
if property.type != "Binary": continue
property.code_point_db = number_set
def load_Composition_Exclusion(self):
# Column 0 contains what is interesting ...
table = parse_table("CompositionExclusions.txt", NumberColumnList=[0])
number_set = NumberSet()
for row in table:
begin = row[0]
# '.quick_append()' triggered assert!
number_set.add_interval(Interval(begin, begin + 1))
number_set.clean()
self.db["CE"].code_point_db = number_set
def load_UnicodeData(self):
fh = open_data_base_file("UnicodeData.txt")
# some rows contain aliases, so they need to get converted into values
property_general_category = self.db["gc"]
property_bidi_class = self.db["bc"]
def convert(Property, ValueAlias):
"""Convert specified ValueAlias to Value of the given property."""
if ValueAlias in Property.alias_to_name_map:
return Property.alias_to_name_map[ValueAlias]
return ValueAlias
names_db = {}
general_category_db = {}
bidi_class_db = {}
numeric_value_db = {}
names_uc1_db = {}
iso_comment_db = {}
for line in fh.readlines():
if line.find("#") != -1: line = line[:line.find("#")]
if line == "" or line.isspace(): continue
x = line.split(";")
code_point = int("0x" + x[0].strip(), 16) # CodePointIdx = 0
name = x[1].strip().replace(" ", "_") # NameIdx = 1
general_category = x[2].strip().replace(" ", "_") # GeneralCategoryIdx = 2
general_category = convert(property_general_category, general_category)
bidi_class = x[4].strip().replace(" ", "_") # BidiClassIdx = 4
bidi_class = convert(property_bidi_class, bidi_class)
numeric_value = x[6].strip() # NumericValueIdx = 6
uc1_name = x[10].strip().replace(" ", "_") # NameUC1Idx = 10
iso_comment = x[11].strip().replace(" ", "_") # ISO_CommentIdx = 11
names_db[name] = code_point
general_category_db.setdefault(general_category, NumberSet()).quick_append_value(code_point)
bidi_class_db.setdefault (bidi_class, NumberSet()).quick_append_value(code_point)
numeric_value_db.setdefault (numeric_value, NumberSet()).quick_append_value(code_point)
names_uc1_db[uc1_name] = code_point
iso_comment_db[iso_comment] = str(code_point)
self.db["na"].code_point_db = names_db # Name
self.db["gc"].code_point_db = general_category_db # General Category
self.db["bc"].code_point_db = bidi_class_db # BidiClass
self.db["nv"].code_point_db = numeric_value_db # Numeric Value
self.db["na1"].code_point_db = names_uc1_db # Name Unicode 1
self.db["isc"].code_point_db = iso_comment_db # ISO_Comment
def map_code_point_to_character_name(self, CodePoint):
if self.db["na"].code_point_db is None:
self.load_UnicodeData()
if len(self.__code_point_to_name_db) == 0:
for key, value in list(self.db["na"].code_point_db.items()):
self.__code_point_to_name_db[value] = key
for key, value in list(self.db["na1"].code_point_db.items()):
if value in self.__code_point_to_name_db: continue
self.__code_point_to_name_db[value] = key
return self.__code_point_to_name_db.get(CodePoint, "UCS 0x%06X" % CodePoint)
def get_property_descriptions(self):
item_list = list(self.db.items())
L = max([len(property.name) for property in list(self.db.values())])
La = max([len(property.alias) for property in list(self.db.values())])
Lt = max([len(property.type) for property in list(self.db.values())])
txt = "# Abbreviation, Name, Type\n"
item_list.sort(key=lambda x: x[0])
for key, property in item_list:
txt += "%s, %s%s, %s%s" % \
(property.alias, " " * (La - len(property.alias)),
property.name, " " * (L - len(property.name)),
property.type)
property.init_code_point_db()
if property.code_point_db is None:
txt += ", " + " " * (Lt - len(property.type)) + "<unsupported>"
txt += "\n"
return txt
def get_property_names(self, BinaryOnlyF=False):
if self.db == {}:
self.init_db()
alias_list = list(self.db.keys())
alias_list.sort(key=lambda x: self.db[x])
txt = []
for alias in sorted(alias_list):
if BinaryOnlyF and self.db[alias].type != "Binary": continue
txt.append("%s (%s)" % (self.db[alias].name, alias))
txt.append(", ")
if txt: txt = txt[:-1] # Remove trailing ", "
return "".join(txt)
def get_property_name_list(self, BinaryOnlyF=False):
if self.db == {}: self.init_db()
alias_list = list(self.db.keys())
alias_list.sort(key=lambda x: self.db[x].name)
result = copy(alias_list)
result.extend(
self.db[alias].name
for alias in alias_list
if self.db[alias].type != "Binary" or not BinaryOnlyF
)
return sorted(result)
def get_documentation(self):
binary_property_list = []
non_binary_property_list = []
for property in list(self.db.values()):
if property.type == "Binary": binary_property_list.append(property)
else: non_binary_property_list.append(property)
def list_to_string(the_list):
the_list.sort(key=lambda x: x.name)
txt = ""
for property in the_list:
txt += property.name + "(%s), " % property.alias
return txt
txt = "Binary Properties::\n\n"
txt += " " + list_to_string(binary_property_list)
txt += "\n\n"
txt += "Non-Binary Properties::\n\n"
txt += " " + list_to_string(non_binary_property_list)
txt += "\n\n"
txt += "--------------------------------------------------------------\n"
txt += "\n\n"
txt += "Property settings:\n"
txt += "\n\n"
for property in non_binary_property_list:
if property.type == "Binary": continue
txt += "%s::\n\n" % property.name
property.init_code_point_db()
if property.code_point_db is None:
txt += " (not supported)\n"
elif property.name in ["Name", "Unicode_1_Name"]:
txt += " (see Unicode Standard Literature)\n"
else:
value_txt = property.get_value_list_help(270, OpeningBracket="$$", ClosingBracket="$$")
txt += " " + value_txt + "\n"
txt += "\n"
return txt
ucs_property_db = PropertyInfoDB()
if __name__ == "__main__":
ucs_property_db.init_db()
################################################################################
# NOTE: Do not delete this. It is used to generate documentation automatically.
################################################################################
print(ucs_property_db.get_documentation())
# print ucs_property_db.db["bc"].get_value_list_help()
# print ucs_property_db.get_character_set("Block", "Arabic")
# print ucs_property_db.get_character_set("Age", "5.0")
# print ucs_property_db.get_character_set("Script", "Greek")
# print "%X" % names_db["LATIN SMALL LETTER CLOSED REVERSED EPSILON"]
# print ucs_property_db.get_character_set("White_Space")
#print ucs_property_db.get_property_descriptions()
|
import pytest
import numpy as np
from pytest import raises
from copy import deepcopy
from thyme.parsers.cp2k import parse_md, pack_folder_trj
from thyme.trajectory import Trajectory
from pathlib import Path
class TestParseMD:
def test_direct(self):
folder = "tests/example_files/cp2k_md"
trj = parse_md(folder, f"{folder}/CP2K.inp", "CO2H2")
# check whether all frames are read
assert len(trj) == 3
# check whether it parsed the fixed atom and nan their entries correctly
assert (trj.force[:, 0, :] != trj.force[:, 0, :]).all()
assert (trj.force[:, 1:, :] == trj.force[:, 1:, :]).all()
def test_foldersearch(self):
folder = "tests/example_files/cp2k_md"
trj = pack_folder_trj(folder=folder, data_filter=None)
assert len(trj) == 3
class TestASEShellOut:
def test_direct(self):
pass
|
import re
import json
import six
from itertools import cycle
from scrapy.utils.misc import load_object
from scrapy_rotated_proxy.extensions import default_settings
from scrapy_rotated_proxy import util
import logging
if six.PY2:
from urlparse import urlunparse
from urllib2 import _parse_proxy
else:
from urllib.parse import urlunparse
from urllib.request import _parse_proxy
logger = logging.getLogger(__name__)
class FileProxyStorage():
def __init__(self, settings, auth_encoding='latin-1'):
self.file_path = None
if settings.get('PROXY_FILE_PATH') or getattr(default_settings,
'PROXY_FILE_PATH'):
self.file_path = settings.get('PROXY_FILE_PATH',
getattr(default_settings,
'PROXY_FILE_PATH'))
else:
self.settings = settings
self.auth_encoding = auth_encoding
def open_spider(self, spider):
logger.info('{storage} opened'.format(storage=self.__class__.__name__))
def close_spider(self, spider):
logger.info('{storage} closed'.format(storage=self.__class__.__name__))
def _get_proxy(self, url, orig_type=''):
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse(
(proxy_type or orig_type, hostport, '', '', '', ''))
creds = util._basic_auth_header(user, password,
self.auth_encoding) if user else None
return creds, proxy_url
def proxies(self):
pattern = re.compile(r'(?P<scheme>[A-Z]+)_PROXIES')
def _filter(tuple_):
m = pattern.match(tuple_[0])
if m:
scheme = m.group('scheme').lower()
return scheme, {self._get_proxy(item, scheme) for item in
tuple_[1]}
if self.file_path:
self.settings = json.load(open(self.file_path))
proxies = []
for item in self.settings.items():
pair = _filter(item)
if pair:
proxies.append(pair)
return dict(proxies)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 8 10:46:25 2019
@author: Lombardi
"""
'''
APRIL
'''
from core import User, np
User_list = []
#User classes definition
LI = User("low income",1)
User_list.append(LI)
#Appliances definition
#Low Income
LI_indoor_bulb = LI.Appliance(LI,3,7,2,120,0.2,10)
LI_indoor_bulb.windows([1082,1440],[0,30],0.35)
LI_outdoor_bulb = LI.Appliance(LI,1,13,2,600,0.2,10)
LI_outdoor_bulb.windows([0,330],[1082,1440],0.35)
LI_TV = LI.Appliance(LI,1,60,3,90,0.1,5)
LI_TV.windows([750,840],[1082,1440],0.35,[0,30])
LI_DVD = LI.Appliance(LI,1,8,3,30,0.1,5)
LI_DVD.windows([750,840],[1082,1440],0.35,[0,30])
LI_Antenna = LI.Appliance(LI,1,8,3,60,0.1,5)
LI_Antenna.windows([750,840],[1082,1440],0.35,[0,30])
LI_Phone_charger = LI.Appliance(LI,3,2,1,300,0.2,5)
LI_Phone_charger.windows([1080,1440],[0,0],0.35)
|
from datetime import date
from django.contrib.contenttypes.models import ContentType
from django.test import Client, TestCase
from django.urls import reverse
from rest_framework import status
from dcim.forms import SiteCSVForm
from dcim.models import Site
from extras.choices import *
from extras.models import CustomField, CustomFieldValue, CustomFieldChoice
from utilities.testing import APITestCase, create_test_user
from virtualization.models import VirtualMachine
class CustomFieldTest(TestCase):
def setUp(self):
Site.objects.bulk_create([
Site(name='Site A', slug='site-a'),
Site(name='Site B', slug='site-b'),
Site(name='Site C', slug='site-c'),
])
def test_simple_fields(self):
DATA = (
{'field_type': CustomFieldTypeChoices.TYPE_TEXT, 'field_value': 'Foobar!', 'empty_value': ''},
{'field_type': CustomFieldTypeChoices.TYPE_INTEGER, 'field_value': 0, 'empty_value': None},
{'field_type': CustomFieldTypeChoices.TYPE_INTEGER, 'field_value': 42, 'empty_value': None},
{'field_type': CustomFieldTypeChoices.TYPE_BOOLEAN, 'field_value': True, 'empty_value': None},
{'field_type': CustomFieldTypeChoices.TYPE_BOOLEAN, 'field_value': False, 'empty_value': None},
{'field_type': CustomFieldTypeChoices.TYPE_DATE, 'field_value': date(2016, 6, 23), 'empty_value': None},
{'field_type': CustomFieldTypeChoices.TYPE_URL, 'field_value': 'http://example.com/', 'empty_value': ''},
)
obj_type = ContentType.objects.get_for_model(Site)
for data in DATA:
# Create a custom field
cf = CustomField(type=data['field_type'], name='my_field', required=False)
cf.save()
cf.obj_type.set([obj_type])
cf.save()
# Assign a value to the first Site
site = Site.objects.first()
cfv = CustomFieldValue(field=cf, obj_type=obj_type, obj_id=site.id)
cfv.value = data['field_value']
cfv.save()
# Retrieve the stored value
cfv = CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).first()
self.assertEqual(cfv.value, data['field_value'])
# Delete the stored value
cfv.value = data['empty_value']
cfv.save()
self.assertEqual(CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).count(), 0)
# Delete the custom field
cf.delete()
def test_select_field(self):
obj_type = ContentType.objects.get_for_model(Site)
# Create a custom field
cf = CustomField(type=CustomFieldTypeChoices.TYPE_SELECT, name='my_field', required=False)
cf.save()
cf.obj_type.set([obj_type])
cf.save()
# Create some choices for the field
CustomFieldChoice.objects.bulk_create([
CustomFieldChoice(field=cf, value='Option A'),
CustomFieldChoice(field=cf, value='Option B'),
CustomFieldChoice(field=cf, value='Option C'),
])
# Assign a value to the first Site
site = Site.objects.first()
cfv = CustomFieldValue(field=cf, obj_type=obj_type, obj_id=site.id)
cfv.value = cf.choices.first()
cfv.save()
# Retrieve the stored value
cfv = CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).first()
self.assertEqual(str(cfv.value), 'Option A')
# Delete the stored value
cfv.value = None
cfv.save()
self.assertEqual(CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).count(), 0)
# Delete the custom field
cf.delete()
class CustomFieldAPITest(APITestCase):
@classmethod
def setUpTestData(cls):
content_type = ContentType.objects.get_for_model(Site)
# Text custom field
cls.cf_text = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='text_field', default='foo')
cls.cf_text.save()
cls.cf_text.obj_type.set([content_type])
# Integer custom field
cls.cf_integer = CustomField(type=CustomFieldTypeChoices.TYPE_INTEGER, name='number_field', default=123)
cls.cf_integer.save()
cls.cf_integer.obj_type.set([content_type])
# Boolean custom field
cls.cf_boolean = CustomField(type=CustomFieldTypeChoices.TYPE_BOOLEAN, name='boolean_field', default=False)
cls.cf_boolean.save()
cls.cf_boolean.obj_type.set([content_type])
# Date custom field
cls.cf_date = CustomField(type=CustomFieldTypeChoices.TYPE_DATE, name='date_field', default='2020-01-01')
cls.cf_date.save()
cls.cf_date.obj_type.set([content_type])
# URL custom field
cls.cf_url = CustomField(type=CustomFieldTypeChoices.TYPE_URL, name='url_field', default='http://example.com/1')
cls.cf_url.save()
cls.cf_url.obj_type.set([content_type])
# Select custom field
cls.cf_select = CustomField(type=CustomFieldTypeChoices.TYPE_SELECT, name='choice_field')
cls.cf_select.save()
cls.cf_select.obj_type.set([content_type])
cls.cf_select_choice1 = CustomFieldChoice(field=cls.cf_select, value='Foo')
cls.cf_select_choice1.save()
cls.cf_select_choice2 = CustomFieldChoice(field=cls.cf_select, value='Bar')
cls.cf_select_choice2.save()
cls.cf_select_choice3 = CustomFieldChoice(field=cls.cf_select, value='Baz')
cls.cf_select_choice3.save()
cls.cf_select.default = cls.cf_select_choice1.value
cls.cf_select.save()
# Create some sites
cls.sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
)
Site.objects.bulk_create(cls.sites)
# Assign custom field values for site 2
site2_cfvs = {
cls.cf_text: 'bar',
cls.cf_integer: 456,
cls.cf_boolean: True,
cls.cf_date: '2020-01-02',
cls.cf_url: 'http://example.com/2',
cls.cf_select: cls.cf_select_choice2.pk,
}
for field, value in site2_cfvs.items():
cfv = CustomFieldValue(field=field, obj=cls.sites[1])
cfv.value = value
cfv.save()
def test_get_single_object_without_custom_field_values(self):
"""
Validate that custom fields are present on an object even if it has no values defined.
"""
url = reverse('dcim-api:site-detail', kwargs={'pk': self.sites[0].pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.sites[0].name)
self.assertEqual(response.data['custom_fields'], {
'text_field': None,
'number_field': None,
'boolean_field': None,
'date_field': None,
'url_field': None,
'choice_field': None,
})
def test_get_single_object_with_custom_field_values(self):
"""
Validate that custom fields are present and correctly set for an object with values defined.
"""
site2_cfvs = {
cfv.field.name: cfv.value for cfv in self.sites[1].custom_field_values.all()
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.sites[1].pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.sites[1].name)
self.assertEqual(response.data['custom_fields']['text_field'], site2_cfvs['text_field'])
self.assertEqual(response.data['custom_fields']['number_field'], site2_cfvs['number_field'])
self.assertEqual(response.data['custom_fields']['boolean_field'], site2_cfvs['boolean_field'])
self.assertEqual(response.data['custom_fields']['date_field'], site2_cfvs['date_field'])
self.assertEqual(response.data['custom_fields']['url_field'], site2_cfvs['url_field'])
self.assertEqual(response.data['custom_fields']['choice_field']['label'], self.cf_select_choice2.value)
def test_create_single_object_with_defaults(self):
"""
Create a new site with no specified custom field values and check that it received the default values.
"""
data = {
'name': 'Site 3',
'slug': 'site-3',
}
url = reverse('dcim-api:site-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
# Validate response data
response_cf = response.data['custom_fields']
self.assertEqual(response_cf['text_field'], self.cf_text.default)
self.assertEqual(response_cf['number_field'], self.cf_integer.default)
self.assertEqual(response_cf['boolean_field'], self.cf_boolean.default)
self.assertEqual(response_cf['date_field'], self.cf_date.default)
self.assertEqual(response_cf['url_field'], self.cf_url.default)
self.assertEqual(response_cf['choice_field'], self.cf_select_choice1.pk)
# Validate database data
site = Site.objects.get(pk=response.data['id'])
cfvs = {
cfv.field.name: cfv.value for cfv in site.custom_field_values.all()
}
self.assertEqual(cfvs['text_field'], self.cf_text.default)
self.assertEqual(cfvs['number_field'], self.cf_integer.default)
self.assertEqual(cfvs['boolean_field'], self.cf_boolean.default)
self.assertEqual(str(cfvs['date_field']), self.cf_date.default)
self.assertEqual(cfvs['url_field'], self.cf_url.default)
self.assertEqual(cfvs['choice_field'].pk, self.cf_select_choice1.pk)
def test_create_single_object_with_values(self):
"""
Create a single new site with a value for each type of custom field.
"""
data = {
'name': 'Site 3',
'slug': 'site-3',
'custom_fields': {
'text_field': 'bar',
'number_field': 456,
'boolean_field': True,
'date_field': '2020-01-02',
'url_field': 'http://example.com/2',
'choice_field': self.cf_select_choice2.pk,
},
}
url = reverse('dcim-api:site-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
# Validate response data
response_cf = response.data['custom_fields']
data_cf = data['custom_fields']
self.assertEqual(response_cf['text_field'], data_cf['text_field'])
self.assertEqual(response_cf['number_field'], data_cf['number_field'])
self.assertEqual(response_cf['boolean_field'], data_cf['boolean_field'])
self.assertEqual(response_cf['date_field'], data_cf['date_field'])
self.assertEqual(response_cf['url_field'], data_cf['url_field'])
self.assertEqual(response_cf['choice_field'], data_cf['choice_field'])
# Validate database data
site = Site.objects.get(pk=response.data['id'])
cfvs = {
cfv.field.name: cfv.value for cfv in site.custom_field_values.all()
}
self.assertEqual(cfvs['text_field'], data_cf['text_field'])
self.assertEqual(cfvs['number_field'], data_cf['number_field'])
self.assertEqual(cfvs['boolean_field'], data_cf['boolean_field'])
self.assertEqual(str(cfvs['date_field']), data_cf['date_field'])
self.assertEqual(cfvs['url_field'], data_cf['url_field'])
self.assertEqual(cfvs['choice_field'].pk, data_cf['choice_field'])
def test_create_multiple_objects_with_defaults(self):
"""
Create three news sites with no specified custom field values and check that each received
the default custom field values.
"""
data = (
{
'name': 'Site 3',
'slug': 'site-3',
},
{
'name': 'Site 4',
'slug': 'site-4',
},
{
'name': 'Site 5',
'slug': 'site-5',
},
)
url = reverse('dcim-api:site-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(len(response.data), len(data))
for i, obj in enumerate(data):
# Validate response data
response_cf = response.data[i]['custom_fields']
self.assertEqual(response_cf['text_field'], self.cf_text.default)
self.assertEqual(response_cf['number_field'], self.cf_integer.default)
self.assertEqual(response_cf['boolean_field'], self.cf_boolean.default)
self.assertEqual(response_cf['date_field'], self.cf_date.default)
self.assertEqual(response_cf['url_field'], self.cf_url.default)
self.assertEqual(response_cf['choice_field'], self.cf_select_choice1.pk)
# Validate database data
site = Site.objects.get(pk=response.data[i]['id'])
cfvs = {
cfv.field.name: cfv.value for cfv in site.custom_field_values.all()
}
self.assertEqual(cfvs['text_field'], self.cf_text.default)
self.assertEqual(cfvs['number_field'], self.cf_integer.default)
self.assertEqual(cfvs['boolean_field'], self.cf_boolean.default)
self.assertEqual(str(cfvs['date_field']), self.cf_date.default)
self.assertEqual(cfvs['url_field'], self.cf_url.default)
self.assertEqual(cfvs['choice_field'].pk, self.cf_select_choice1.pk)
def test_create_multiple_objects_with_values(self):
"""
Create a three new sites, each with custom fields defined.
"""
custom_field_data = {
'text_field': 'bar',
'number_field': 456,
'boolean_field': True,
'date_field': '2020-01-02',
'url_field': 'http://example.com/2',
'choice_field': self.cf_select_choice2.pk,
}
data = (
{
'name': 'Site 3',
'slug': 'site-3',
'custom_fields': custom_field_data,
},
{
'name': 'Site 4',
'slug': 'site-4',
'custom_fields': custom_field_data,
},
{
'name': 'Site 5',
'slug': 'site-5',
'custom_fields': custom_field_data,
},
)
url = reverse('dcim-api:site-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(len(response.data), len(data))
for i, obj in enumerate(data):
# Validate response data
response_cf = response.data[i]['custom_fields']
self.assertEqual(response_cf['text_field'], custom_field_data['text_field'])
self.assertEqual(response_cf['number_field'], custom_field_data['number_field'])
self.assertEqual(response_cf['boolean_field'], custom_field_data['boolean_field'])
self.assertEqual(response_cf['date_field'], custom_field_data['date_field'])
self.assertEqual(response_cf['url_field'], custom_field_data['url_field'])
self.assertEqual(response_cf['choice_field'], custom_field_data['choice_field'])
# Validate database data
site = Site.objects.get(pk=response.data[i]['id'])
cfvs = {
cfv.field.name: cfv.value for cfv in site.custom_field_values.all()
}
self.assertEqual(cfvs['text_field'], custom_field_data['text_field'])
self.assertEqual(cfvs['number_field'], custom_field_data['number_field'])
self.assertEqual(cfvs['boolean_field'], custom_field_data['boolean_field'])
self.assertEqual(str(cfvs['date_field']), custom_field_data['date_field'])
self.assertEqual(cfvs['url_field'], custom_field_data['url_field'])
self.assertEqual(cfvs['choice_field'].pk, custom_field_data['choice_field'])
def test_update_single_object_with_values(self):
"""
Update an object with existing custom field values. Ensure that only the updated custom field values are
modified.
"""
site2_original_cfvs = {
cfv.field.name: cfv.value for cfv in self.sites[1].custom_field_values.all()
}
data = {
'custom_fields': {
'text_field': 'ABCD',
'number_field': 1234,
},
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.sites[1].pk})
response = self.client.patch(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
# Validate response data
response_cf = response.data['custom_fields']
data_cf = data['custom_fields']
self.assertEqual(response_cf['text_field'], data_cf['text_field'])
self.assertEqual(response_cf['number_field'], data_cf['number_field'])
# TODO: Non-updated fields are missing from the response data
# self.assertEqual(response_cf['boolean_field'], site2_original_cfvs['boolean_field'])
# self.assertEqual(response_cf['date_field'], site2_original_cfvs['date_field'])
# self.assertEqual(response_cf['url_field'], site2_original_cfvs['url_field'])
# self.assertEqual(response_cf['choice_field']['label'], site2_original_cfvs['choice_field'].value)
# Validate database data
site2_updated_cfvs = {
cfv.field.name: cfv.value for cfv in self.sites[1].custom_field_values.all()
}
self.assertEqual(site2_updated_cfvs['text_field'], data_cf['text_field'])
self.assertEqual(site2_updated_cfvs['number_field'], data_cf['number_field'])
self.assertEqual(site2_updated_cfvs['boolean_field'], site2_original_cfvs['boolean_field'])
self.assertEqual(site2_updated_cfvs['date_field'], site2_original_cfvs['date_field'])
self.assertEqual(site2_updated_cfvs['url_field'], site2_original_cfvs['url_field'])
self.assertEqual(site2_updated_cfvs['choice_field'], site2_original_cfvs['choice_field'])
class CustomFieldChoiceAPITest(APITestCase):
def setUp(self):
super().setUp()
vm_content_type = ContentType.objects.get_for_model(VirtualMachine)
self.cf_1 = CustomField.objects.create(name="cf_1", type=CustomFieldTypeChoices.TYPE_SELECT)
self.cf_2 = CustomField.objects.create(name="cf_2", type=CustomFieldTypeChoices.TYPE_SELECT)
self.cf_choice_1 = CustomFieldChoice.objects.create(field=self.cf_1, value="cf_field_1", weight=100)
self.cf_choice_2 = CustomFieldChoice.objects.create(field=self.cf_1, value="cf_field_2", weight=50)
self.cf_choice_3 = CustomFieldChoice.objects.create(field=self.cf_2, value="cf_field_3", weight=10)
def test_list_cfc(self):
url = reverse('extras-api:custom-field-choice-list')
response = self.client.get(url, **self.header)
self.assertEqual(len(response.data), 2)
self.assertEqual(len(response.data[self.cf_1.name]), 2)
self.assertEqual(len(response.data[self.cf_2.name]), 1)
self.assertTrue(self.cf_choice_1.value in response.data[self.cf_1.name])
self.assertTrue(self.cf_choice_2.value in response.data[self.cf_1.name])
self.assertTrue(self.cf_choice_3.value in response.data[self.cf_2.name])
self.assertEqual(self.cf_choice_1.pk, response.data[self.cf_1.name][self.cf_choice_1.value])
self.assertEqual(self.cf_choice_2.pk, response.data[self.cf_1.name][self.cf_choice_2.value])
self.assertEqual(self.cf_choice_3.pk, response.data[self.cf_2.name][self.cf_choice_3.value])
class CustomFieldImportTest(TestCase):
def setUp(self):
user = create_test_user(
permissions=[
'dcim.view_site',
'dcim.add_site',
]
)
self.client = Client()
self.client.force_login(user)
@classmethod
def setUpTestData(cls):
custom_fields = (
CustomField(name='text', type=CustomFieldTypeChoices.TYPE_TEXT),
CustomField(name='integer', type=CustomFieldTypeChoices.TYPE_INTEGER),
CustomField(name='boolean', type=CustomFieldTypeChoices.TYPE_BOOLEAN),
CustomField(name='date', type=CustomFieldTypeChoices.TYPE_DATE),
CustomField(name='url', type=CustomFieldTypeChoices.TYPE_URL),
CustomField(name='select', type=CustomFieldTypeChoices.TYPE_SELECT),
)
for cf in custom_fields:
cf.save()
cf.obj_type.set([ContentType.objects.get_for_model(Site)])
CustomFieldChoice.objects.bulk_create((
CustomFieldChoice(field=custom_fields[5], value='Choice A'),
CustomFieldChoice(field=custom_fields[5], value='Choice B'),
CustomFieldChoice(field=custom_fields[5], value='Choice C'),
))
def test_import(self):
"""
Import a Site in CSV format, including a value for each CustomField.
"""
data = (
('name', 'slug', 'cf_text', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_select'),
('Site 1', 'site-1', 'ABC', '123', 'True', '2020-01-01', 'http://example.com/1', 'Choice A'),
('Site 2', 'site-2', 'DEF', '456', 'False', '2020-01-02', 'http://example.com/2', 'Choice B'),
('Site 3', 'site-3', '', '', '', '', '', ''),
)
csv_data = '\n'.join(','.join(row) for row in data)
response = self.client.post(reverse('dcim:site_import'), {'csv': csv_data})
self.assertEqual(response.status_code, 200)
# Validate data for site 1
custom_field_values = {
cf.name: value for cf, value in Site.objects.get(name='Site 1').get_custom_fields().items()
}
self.assertEqual(len(custom_field_values), 6)
self.assertEqual(custom_field_values['text'], 'ABC')
self.assertEqual(custom_field_values['integer'], 123)
self.assertEqual(custom_field_values['boolean'], True)
self.assertEqual(custom_field_values['date'], date(2020, 1, 1))
self.assertEqual(custom_field_values['url'], 'http://example.com/1')
self.assertEqual(custom_field_values['select'].value, 'Choice A')
# Validate data for site 2
custom_field_values = {
cf.name: value for cf, value in Site.objects.get(name='Site 2').get_custom_fields().items()
}
self.assertEqual(len(custom_field_values), 6)
self.assertEqual(custom_field_values['text'], 'DEF')
self.assertEqual(custom_field_values['integer'], 456)
self.assertEqual(custom_field_values['boolean'], False)
self.assertEqual(custom_field_values['date'], date(2020, 1, 2))
self.assertEqual(custom_field_values['url'], 'http://example.com/2')
self.assertEqual(custom_field_values['select'].value, 'Choice B')
# No CustomFieldValues should be created for site 3
obj_type = ContentType.objects.get_for_model(Site)
site3 = Site.objects.get(name='Site 3')
self.assertFalse(CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site3.pk).exists())
self.assertEqual(CustomFieldValue.objects.count(), 12) # Sanity check
def test_import_missing_required(self):
"""
Attempt to import an object missing a required custom field.
"""
# Set one of our CustomFields to required
CustomField.objects.filter(name='text').update(required=True)
form_data = {
'name': 'Site 1',
'slug': 'site-1',
}
form = SiteCSVForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertIn('cf_text', form.errors)
def test_import_invalid_choice(self):
"""
Attempt to import an object with an invalid choice selection.
"""
form_data = {
'name': 'Site 1',
'slug': 'site-1',
'cf_select': 'Choice X'
}
form = SiteCSVForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertIn('cf_select', form.errors)
|
from django.views.generic import ListView, DetailView
from eventex.core.models import Speaker, Talk
home = ListView.as_view(template_name='index.html', model=Speaker)
speaker_detail = DetailView.as_view(model=Speaker)
talk_list = ListView.as_view(model=Talk)
|
import utils.init_multiprocessing # BEFORE numpy
import numpy as np
import os
import matplotlib.pyplot as plt
from twocell.twocell_simulate import twocell_sim_fast, twocell_sim_as_onelargemodel
from multicell.multicell_spatialcell import SpatialCell
from singlecell.singlecell_constants import MEMS_UNFOLD, BETA
from utils.file_io import RUNS_FOLDER
from singlecell.singlecell_simsetup import singlecell_simsetup
def twocell_ensemble_stats(simsetup, steps, beta, gamma, ens=10, monolothic_flag=False):
# TODO issue: the monolothic sim (big Jij) has different statistics than the cell-by-cell sim
# TODO more differences than one would expect -- can only see for nonzero gamma
# TODO CHECK: behaviour of each approach for one low temp traj at high gamma
# TODO also note that any asymmetry (on the x=y reflection line) in the mA vs mB scatterplot is unexpected
overlap_data = np.zeros((ens, 2 * simsetup['P']))
assert simsetup['P'] == 1
XI_scaled = simsetup['XI'] / simsetup['N']
def random_twocell_lattice():
cell_a_init = np.array([2*int(np.random.rand() < .5) - 1 for _ in range(simsetup['N'])]).T
cell_b_init = np.array([2*int(np.random.rand() < .5) - 1 for _ in range(simsetup['N'])]).T
#cell_a_init = np.ones(20) #np.array([-1, -1, 1, -1,-1,-1])
#cell_b_init = np.ones(20) #np.array([1, 1, 1, 1, 1, 1])
lattice = [[SpatialCell(cell_a_init, 'Cell A', [0, 0], simsetup),
SpatialCell(cell_b_init, 'Cell B', [0, 1], simsetup)]]
return lattice
for traj in range(ens):
if traj % 100 == 0:
print("Running traj", traj, "...")
lattice = random_twocell_lattice()
# TODO replace with twocell_sim_as_onelargemodel (i.e. one big ising model)
if monolothic_flag:
lattice = twocell_sim_as_onelargemodel(
lattice, simsetup, steps, beta=beta, gamma=gamma, async_flag=False)
else:
lattice = twocell_sim_fast(
lattice, simsetup, steps, beta=beta, gamma=gamma, async_flag=False)
cell_A_endstate = lattice[0][0].get_state_array()[:, -1]
cell_B_endstate = lattice[0][1].get_state_array()[:, -1]
cell_A_overlaps = np.dot(XI_scaled.T, cell_A_endstate)
cell_B_overlaps = np.dot(XI_scaled.T, cell_B_endstate)
overlap_data[traj, 0:simsetup['P']] = cell_A_overlaps
overlap_data[traj, simsetup['P']:] = cell_B_overlaps
if simsetup['P'] == 1:
plt.figure()
plt.scatter(overlap_data[:,0], overlap_data[:,1], alpha=0.2)
fname = "overlaps_ens%d_beta%.2f_gamma%.2f_mono%d.png" % (ens, beta, gamma, monolothic_flag)
plt.title(fname)
plt.xlabel(r"$m_A$")
plt.ylabel(r"$m_B$")
plt.xlim(-1.05, 1.05)
plt.ylim(-1.05, 1.05)
plt.savefig(RUNS_FOLDER + os.sep + "twocell_analysis" + os.sep + fname)
plt.close()
print(fname)
"""
import seaborn as sns; sns.set()
import pandas as pd
df_overlap_data = pd.DataFrame({r"$m_A$":overlap_data[:,0], r"$m_B$":overlap_data[:,1]})
cmap = sns.cubehelix_palette(dark=.3, light=.8, as_cmap=True)
#ax = sns.scatterplot(x=r"$m_A$", y=r"$m_B$", palette=cmap,
# sizes=(20, 200), hue_norm=(0, 7), legend="full", data=df_overlap_data)
ax = sns.kdeplot(overlap_data[:,0], overlap_data[:,1], shade=True, palette=cmap)
plt.show()
"""
else:
# TODO do some dim reduction
assert 1==2
return overlap_data
def twocell_coarse_hamiltonian(simsetup, gamma, ens=10000):
def random_twocell_lattice():
cell_a_init = np.array([2*int(np.random.rand() < .5) - 1 for _ in range(simsetup['N'])]).T
cell_b_init = np.array([2*int(np.random.rand() < .5) - 1 for _ in range(simsetup['N'])]).T
lattice = [[SpatialCell(cell_a_init, 'Cell A', [0, 0], simsetup),
SpatialCell(cell_b_init, 'Cell B', [0, 1], simsetup)]]
return lattice
energy_data = np.zeros((ens, 3))
assert simsetup['P'] == 1
XI_scaled = simsetup['XI'] / simsetup['N']
W_matrix = simsetup['FIELD_SEND']
W_matrix_sym = 0.5 * (W_matrix + W_matrix.T)
W_dot_one_scaled = np.dot(W_matrix, np.ones(simsetup['N'])) * gamma / 2
"""
for elem in xrange(ens):
lattice = random_twocell_lattice()
cell_a = lattice[0][0].get_current_state()
cell_b = lattice[0][1].get_current_state()
energy_A = -0.5 * np.dot(cell_a, np.dot(simsetup['J'], cell_a))
energy_B = -0.5 * np.dot(cell_b, np.dot(simsetup['J'], cell_b))
energy_coupling = - np.dot(W_dot_one_scaled, cell_a + cell_b) - 0.5 * np.dot( cell_a, np.dot(W_matrix, cell_b) ) - 0.5 * np.dot( cell_b, np.dot(W_matrix, cell_a) )
energy_data[elem, 0] = np.dot(XI_scaled.T, cell_a)
energy_data[elem, 1] = np.dot(XI_scaled.T, cell_b)
energy_data[elem, 2] = energy_A + energy_B + energy_coupling
"""
lattice = random_twocell_lattice()
def beta_anneal(elem):
assert ens > 1000
timer = elem % 100
beta_low = 0.01
beta_mid = 1.5
beta_high = 20.0
# want low for around 50 steps, high for around 50 steps
if timer <= 33:
beta_step = beta_low
elif 33 < timer <= 90:
beta_step = beta_mid
else:
beta_step = beta_high
return beta_step
for elem in range(ens):
# anneal to reach the corners
beta_schedule = beta_anneal(elem)
lattice = twocell_sim_as_onelargemodel(
lattice, simsetup, steps, beta=beta_schedule, gamma=0.0, async_flag=False)
cell_a = lattice[0][0].get_current_state()
cell_b = lattice[0][1].get_current_state()
print(elem, beta_schedule, np.dot(cell_a, np.ones(20)) / 20.0, np.dot(cell_b, np.ones(20)) / 20.0)
energy_A = -0.5 * np.dot(cell_a, np.dot(simsetup['J'], cell_a))
energy_B = -0.5 * np.dot(cell_b, np.dot(simsetup['J'], cell_b))
energy_coupling = - np.dot(W_dot_one_scaled, cell_a + cell_b) - 0.5 * np.dot( cell_a, np.dot(W_matrix, cell_b) ) - 0.5 * np.dot( cell_b, np.dot(W_matrix, cell_a) )
energy_data[elem, 0] = np.dot(XI_scaled.T, cell_a)
energy_data[elem, 1] = np.dot(XI_scaled.T, cell_b)
energy_data[elem, 2] = energy_A + energy_B + energy_coupling
# plot alt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(energy_data[:, 0], energy_data[:, 1], energy_data[:, 2], c=energy_data[:, 2], marker='o')
plt.xlim(-1.05, 1.05)
plt.ylim(-1.05, 1.05)
plt.show()
plt.close()
# plotting
import seaborn as sns
sns.set()
import pandas as pd
emax = np.max(energy_data[:, 2])
emin = np.min(energy_data[:, 2])
df_overlap_data = pd.DataFrame({r"$m_A$": energy_data[:, 0],
r"$m_B$": energy_data[:, 1],
r'$H(s)$': energy_data[:, 2],
'normed_energy': (emax - emin)/(energy_data[:, 2] - emin)})
plt.figure()
cmap = sns.cubehelix_palette(dark=.3, light=.8, as_cmap=True, reverse=True)
ax = sns.scatterplot(x=r"$m_A$", y=r"$m_B$", palette=cmap, legend='brief',
hue=r'$H(s)$', data=df_overlap_data)
fname = "energyrough_ens%d_gamma%.2f.pdf" % (ens, gamma)
plt.xlim(-1.05, 1.05)
plt.ylim(-1.05, 1.05)
plt.savefig(RUNS_FOLDER + os.sep + "twocell_analysis" + os.sep + fname)
plt.close()
return
if __name__ == '__main__':
random_mem = False
random_W = False
simsetup = singlecell_simsetup(unfolding=True, random_mem=random_mem, random_W=random_W, npzpath=MEMS_UNFOLD,
curated=True)
print('note: N =', simsetup['N'])
ensemble = 2500
steps = 10
beta_low = 1.5 # 2.0
beta_mid = 10.0
beta_high = 100.0
gamma = 20.0
twocell_coarse_hamiltonian(simsetup, gamma, ens=5000)
twocell_ensemble_stats(simsetup, steps, beta_low, gamma, ens=ensemble, monolothic_flag=False)
twocell_ensemble_stats(simsetup, steps, beta_low, gamma, ens=ensemble, monolothic_flag=True)
twocell_ensemble_stats(simsetup, steps, beta_mid, gamma, ens=ensemble, monolothic_flag=False)
twocell_ensemble_stats(simsetup, steps, beta_mid, gamma, ens=ensemble, monolothic_flag=True)
twocell_ensemble_stats(simsetup, steps, beta_high, gamma, ens=ensemble, monolothic_flag=False)
twocell_ensemble_stats(simsetup, steps, beta_high, gamma, ens=ensemble, monolothic_flag=True)
#for gamma in [0.0, 0.1, 0.5, 1.0, 2.0, 3.0, 5.0, 10000.0]:
# twocell_ensemble_stats(simsetup, steps, beta, gamma, ens=ensemble, monolothic_flag=False)
# twocell_ensemble_stats(simsetup, steps, beta, gamma, ens=ensemble, monolothic_flag=True)
|
from functools import lru_cache
from typing import Optional, List
from fastapi.middleware.cors import CORSMiddleware
from fastapi import Depends, FastAPI, WebSocket, WebSocketDisconnect
from fastapi.staticfiles import StaticFiles
from jose import JWTError, jwt
from redminelib import Redmine
from apscheduler.schedulers.background import BackgroundScheduler
import mysql.connector
from fastapi_cache import caches, close_caches
from fastapi_cache.backends.redis import CACHE_KEY, RedisCacheBackend
from app import config
import logging
import random
@lru_cache()
def get_settings():
return config.Settings()
redmine = Redmine(get_settings().redmine_url, key=get_settings().redmine_api_token)
redis_host = get_settings().redis_host
redis_port = get_settings().redis_port
redis_password = get_settings().redis_password
app = FastAPI(title="Redmine API Grabber", root_path=get_settings().root_path)
mysql.connector.connect(
host=get_settings().redmine_db_host,
user=get_settings().redmine_db_user,
password=get_settings().redmine_db_password,
database=get_settings().redmine_db_name,
pool_name="redmine",
pool_size=10,
)
mysql.connector.connect(
host=get_settings().portal_db_host,
user=get_settings().portal_db_user,
password=get_settings().portal_db_password,
database=get_settings().portal_db_name,
pool_name="portal",
pool_size=10,
)
def sql_connection(pool_name):
"""Get a connection and a cursor from the pool"""
db = mysql.connector.connect(pool_name=pool_name)
return db
def redis_cache():
return caches.get(CACHE_KEY)
Schedule = None
from .routers import admin, issues, projects, users, portal
from .services import scheduler
from app.dependencies import get_token_header
from app.controller import notificationController
class ConnectionManager:
def __init__(self):
self.active_connections: List[WebSocket] = []
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.append(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
async def send_personal_message(self, message: str, websocket: WebSocket):
await websocket.send_text(message)
async def send_json(self, message: dict, websocket: WebSocket):
await websocket.send_json(message)
async def broadcast(self, message: dict):
for connection in self.active_connections:
await connection.send_json(message)
@app.on_event("startup")
async def load_schedule_or_create_blank():
"""
Instatialise the Schedule Object as a Global Param and also load existing Schedules from SQLite
This allows for persistent schedules across server restarts.
"""
global Schedule
try:
Schedule = BackgroundScheduler()
Schedule.add_job(scheduler.insert_issues_statuses, "cron", hour="*/1", id="issues")
Schedule.add_job(notificationController.get_birthday, "cron", hour="6", minute="0", second='1', id="birthday")
Schedule.add_job(notificationController.absen_masuk_notification, "cron", hour="9", minute="0", second='0', id="absen_masuk")
Schedule.add_job(notificationController.absen_keluar_notification, "cron", hour="17", minute="0", second='0', id="absen_keluar")
Schedule.add_job(notificationController.new_and_over_due_issues, "cron", hour="10", minute="0", second='0', id="issuesnotif")
Schedule.start()
print("Created Schedule Object")
except:
print("Unable to Create Schedule Object")
@app.on_event("shutdown")
async def pickle_schedule():
"""
An Attempt at Shutting down the schedule to avoid orphan jobs
"""
global Schedule
Schedule.shutdown()
print("Disabled Schedule")
@app.on_event('startup')
async def on_startup() -> None:
rc = RedisCacheBackend(f'redis://:{redis_password}@{redis_host}:{redis_port}')
caches.set(CACHE_KEY, rc)
@app.on_event('shutdown')
async def on_shutdown() -> None:
await close_caches()
manager = ConnectionManager()
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
print("Accepting client connection...")
await manager.connect(websocket)
try:
while True:
data = await websocket.receive_text()
response = {"value": "Testing Data WS " + str(random.uniform(0, 1))}
await manager.broadcast(response)
except WebSocketDisconnect:
manager.disconnect(websocket)
await manager.broadcast(f"Client left server")
print("Bye..")
origins = [
"http://localhost",
"http://localhost:8080",
"http://localhost:5000",
"http://highlight.kirei.co.id",
"https://highlight.kirei.co.id",
]
# get static files
app.mount('/static',StaticFiles(directory="static"),name="static")
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(
admin.router,
prefix="/admin",
tags=["admin"],
dependencies=[Depends(get_token_header)],
)
app.include_router(
issues.router,
prefix="/issues",
tags=["issues"],
dependencies=[Depends(get_token_header)],
)
app.include_router(
projects.router,
prefix="/projects",
tags=["projects"],
dependencies=[Depends(get_token_header)],
)
app.include_router(
users.router,
prefix="/users",
tags=["users"],
dependencies=[Depends(get_token_header)],
)
app.include_router(
portal.router,
prefix="/portal",
tags=["Portal"],
dependencies=[Depends(get_token_header)],
)
|
from os import listdir
from os.path import isfile, join
import unittest
import pprint
from parse_apache_configs import parse_config
from pyparsing import *
class TestParsing(unittest.TestCase):
def test_line_by_line(self):
"""
This method tests the parsing of each line of the
apache config file. It will check to see if each
line can be successfully parsed using the current
pyparsing expressions.
"""
test_files = [ f for f in listdir("./test_conf_files") if isfile(join("./test_conf_files",f)) ]
for file_name in test_files:
file_name = "./test_conf_files/" + file_name
with open(file_name, "r") as apache_config:
for line in apache_config:
parsed_line = parse_config.LINE.parseString(line)
# We don't test blank lines
if len(parsed_line) == 0:
continue
tokenized_line = ungroup(parse_config.LINE).parseString(line)
# Test to see that we got pack a ParseResult's object
self.assertTrue(issubclass(type(tokenized_line),
ParseResults))
# These tests check to see if the ParseResults expression match
# ``line`` according to how it's written to it's corresponding object
# in parse_config.ParseApacheConfig.parse_config(). This
# ensures no characters are left out, and that the parsing
# expressions are correctly implemented.
if self._is_directive(tokenized_line):
directive_string_before = line.lstrip()
directive_string_after = tokenized_line[0] + " " + tokenized_line[1] + "\n"
# This ignores any spaces between the directive name and arguments
# TODO: We need to keep this as close to the original as possible.
self.assertIn(tokenized_line[0], line)
self.assertIn(tokenized_line[1], line)
elif self._is_open_tag(tokenized_line):
open_tag_before = line.lstrip()
open_tag_after = "".join(tokenized_line)
self.assertEqual(open_tag_before, open_tag_after)
elif self._is_close_tag(tokenized_line):
close_tag_before = line.lstrip()
close_tag_after = "</" + tokenized_line[1] + ">" + "\n"
self.assertEqual(close_tag_before, close_tag_after)
def _is_close_tag(self, tokenized_line):
"""
Test to see if tokenized_line is a close_tag
"""
if tokenized_line[0] == '</':
return True
else:
return False
def _is_open_tag(self, tokenized_line):
"""
Returns true if tokenzied_line is an apache start tag.
"""
if tokenized_line[0] == '<':
return True
else:
return False
def _is_directive(self, tokenized_line):
"""
Return true if tokenzied_line is an apache directive
"""
string_line = " ".join(tokenized_line)
try:
parse_config.ANY_DIRECTIVE.parseString(string_line)
except ParseException:
return False
return True
# if tokenized_line[0] != '<' and tokenized_line[0] !='</' and tokenized_line[0] != '#':
# return True
# else:
# return False
|
"""
Construct the other ROC plot.
This code is licensed under the Apache License, Version 2.0. You may
obtain a copy of this license in the LICENSE file in the root
directory of this source tree or at
http://www.apache.org/licenses/LICENSE-2.0.
Any modifications or derivative works of this code must retain this
copyright notice, and modified files need to carry a notice
indicating that they have been altered from the originals.
If you use this code, please cite our paper:
@article{Kerr2020,
author = {Catherine Kerr and Terri Hoare and Paula Carroll and Jakub Marecek},
title = {Integer-Programming Ensemble of Temporal-Relations Classifiers},
journal = {Data Mining and Knowledge Discovery},
volume = {to appear},
year = {2020},
url = {http://arxiv.org/abs/1412.1866},
archivePrefix = {arXiv},
eprint = {1412.1866},
}
"""
import traceback
import numpy as np
import pickle
import math
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.font_manager as fm
def get_fscore(pr):
(p, r) = pr
if p+r == 0:
return 0
return 2.0*p*r/(p+r)
corr = {
"1": "C1",
"2": "C2",
"3": "C3",
"4": "C4",
"5": "N1",
"6": "U1",
"7": "U2",
"8": "U3",
"9": "U4",
"A": "U5",
"B": "N2"
}
def mapnames(s):
return "-".join(map(lambda x: corr[x], s.split("-")))
inFile = "ROC2.pkl"
outFile = 'newsfeeds.pdf'
#inFile = "ROC3.pkl"
#outFile = 'clinical.pdf'
annotations = False
with open(inFile, 'rb') as resultsPickle: results = pickle.load(resultsPickle)
pairs = set(results.values())
print "Distinct precision-recall pairs:", pairs
print "Non-dominated precision-recall pairs:"
a = np.asarray(list(pairs))
which = np.ones(len(pairs), dtype = bool)
for i in range(len(pairs)):
c = a[i,:]
if np.all(np.any(a<=c, axis=1)): print c
else: which[i] = 0
tolerance = 0.1
close = np.ones(len(pairs), dtype = bool)
print "Precision-recall pairs within", tolerance, "tolerance:"
for i in range(len(pairs)):
c = a[i,:]
if np.all(np.any(a<=c+tolerance, axis=1)): print c
#else: close[i] = 0
plt.style.use('grayscale')
#plt.gca().invert_xaxis()
#plt.gca().invert_yaxis()
scatter, ax = plt.subplots()
prop = fm.FontProperties(fname='./Calibri.ttf')
plt.grid(True, alpha=0.2)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.tick_params(axis='both', which='minor', labelsize=10)
#ax.set_xlim(ax.get_xlim()[::-1])
#ax.set_ylim(ax.get_ylim()[::-1])
#scatter.suptitle('ROC Curve', fontproperties=prop, size=10)
# Convert to the false positive rate
#a[0,:] = 1 - a[0,:]
ax.set_xlabel('Precision', fontproperties=prop, size=10)
ax.set_ylabel('Recall', fontproperties=prop, size=10) # relative to plt.rcParams['font.size']
ax.plot(a[which,0], a[which,1], "o")
labels = ['F1 = %.2f' % get_fscore(results[s]) for s in results.keys()]
#labels = ['%s (F1 = %.2f)' % (mapnames(s[0]), get_fscore(results[s])) for s in results.keys()]
placedY = []
if annotations:
for cnt, (label, x, y) in enumerate(zip(labels, a[:, 0], a[:, 1])):
if not which[cnt]: continue
tooClose = False
for placed in placedY:
print y, placedY
if math.fabs(placed - y) < 0.015: tooClose = True
if tooClose: continue
offsetSign = 1
if x < 0.22: continue
if x > 0.28: offsetSign = -1
placedY.append(y)
plt.annotate(
label, annotation_clip = True,
xy=(x, y), xytext=(offsetSign * 100, 0),
textcoords='offset points', ha='right', va='bottom',
# bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(alpha = 0.2, arrowstyle = '->', connectionstyle='arc3,rad=0'))
pp = PdfPages(outFile)
plt.savefig(pp, format='pdf')
ax.plot(a[close,0], a[close,1], "o", alpha=0.2)
plt.savefig(pp, format='pdf')
pp.close() |
"""
Transports provided by aiida_unicore.
Register transports via the "aiida.transports" entry point in setup.json.
"""
from aiida.transports import Transport
class UnicoreTransport(Transport):
"""
AiiDA transport plugin for unicore
"""
|
import torch.nn as nn
import torch.nn.functional as F
import torch
from mmseg.core import add_prefix
from mmseg.ops import resize
from mmcv.runner import load_checkpoint
from ..builder import DISTILLER,build_distill_loss
from mmseg.models import build_segmentor
from mmseg.models.segmentors.base import BaseSegmentor
@DISTILLER.register_module()
class SegmentationDistiller(BaseSegmentor):
"""Base distiller for segmentors.
It typically consists of teacher_model and student_model.
"""
def __init__(self,
teacher_cfg,
student_cfg,
distill_cfg=None,
teacher_pretrained=None,):
super(SegmentationDistiller, self).__init__()
self.teacher = build_segmentor(teacher_cfg.model,
train_cfg=teacher_cfg.get('train_cfg'),
test_cfg=teacher_cfg.get('test_cfg'))
self.init_weights_teacher(teacher_pretrained)
self.teacher.eval()
self.student= build_segmentor(student_cfg.model,
train_cfg=student_cfg.get('train_cfg'),
test_cfg=student_cfg.get('test_cfg'))
self.distill_losses = nn.ModuleDict()
self.distill_cfg = distill_cfg
student_modules = dict(self.student.named_modules())
teacher_modules = dict(self.teacher.named_modules())
def regitster_hooks(student_module,teacher_module):
def hook_teacher_forward(module, input, output):
self.register_buffer(teacher_module,output)
def hook_student_forward(module, input, output):
self.register_buffer( student_module,output )
return hook_teacher_forward,hook_student_forward
for item_loc in distill_cfg:
student_module = 'student_' + item_loc.student_module.replace('.','_')
teacher_module = 'teacher_' + item_loc.teacher_module.replace('.','_')
self.register_buffer(student_module,None)
self.register_buffer(teacher_module,None)
hook_teacher_forward,hook_student_forward = regitster_hooks(student_module ,teacher_module )
teacher_modules[item_loc.teacher_module].register_forward_hook(hook_teacher_forward)
student_modules[item_loc.student_module].register_forward_hook(hook_student_forward)
for item_loss in item_loc.methods:
loss_name = item_loss.name
self.distill_losses[loss_name] = build_distill_loss(item_loss)
def base_parameters(self):
return nn.ModuleList([self.student,self.distill_losses])
def discriminator_parameters(self):
return self.discriminator
def init_weights_teacher(self, path=None):
"""Load the pretrained model in teacher detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
checkpoint = load_checkpoint(self.teacher, path, map_location='cpu')
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
with torch.no_grad():
self.teacher.eval()
teacher_loss = self.teacher.forward_train(img, img_metas, gt_semantic_seg)
student_loss = self.student.forward_train(img, img_metas, gt_semantic_seg)
buffer_dict = dict(self.named_buffers())
for item_loc in self.distill_cfg:
student_module = 'student_' + item_loc.student_module.replace('.','_')
teacher_module = 'teacher_' + item_loc.teacher_module.replace('.','_')
student_feat = buffer_dict[student_module]
teacher_feat = buffer_dict[teacher_module]
for item_loss in item_loc.methods:
loss_name = item_loss.name
student_loss[ loss_name] = self.distill_losses[loss_name](student_feat,teacher_feat)
return student_loss
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap."""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
num_classes = self.student.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
pad_img = crop_img.new_zeros(
(crop_img.size(0), crop_img.size(1), h_crop, w_crop))
pad_img[:, :, :y2 - y1, :x2 - x1] = crop_img
pad_seg_logit = self.student.encode_decode(pad_img, img_meta)
preds[:, :, y1:y2,
x1:x2] += pad_seg_logit[:, :, :y2 - y1, :x2 - x1]
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.student.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit = self.student.encode_decode(img, img_meta)
if rescale:
seg_logit = resize(
seg_logit,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.student.align_corners,
warning=False)
return seg_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.student.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.student.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit = self.whole_inference(img, img_meta, rescale)
output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
if flip:
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
|
from ..lib import PASS, MISSING
from ..error import Invalid
from .core import Validator, ValidatorBase, messages
from copy import copy
import logging, sys
_python3 = sys.version_info[0]>=3
log = logging.getLogger(__name__)
@messages\
( fail='This field must be left out'
)
class Missing( Validator ):
def setParameters(self, default=PASS ):
self.default = default
def on_value( self, context, value ):
raise Invalid( value, self )
def on_missing( self, context ):
if self.default is PASS:
return MISSING
return copy(self.default)
@messages\
( fail='This field must be blank'
)
class Blank( Validator ):
def setParameters( self, default = PASS ):
self.default = default
self.check_container = \
isinstance( default, dict )\
or isinstance( default, list )\
or isinstance( default, tuple )
def on_value( self, context, value ):
if self.check_container \
and not isinstance( value, str)\
and ( isinstance( value, dict ) or isinstance( value, list ) or isinstance( value, tuple) ):
n = MISSING
if len(value) > 0:
if isinstance( value, dict):
for (key, val) in value.items():
if val not in [ MISSING, None, '']:
n = value
break
elif isinstance( value, list) or isinstance( value, tuple ):
for val in value:
if val not in [ MISSING, None, '']:
n = value
break
if n is MISSING:
return copy(self.default)
raise Invalid( value, self )
def on_blank( self, context, value ):
if self.default is PASS:
return value
return copy(self.default)
@messages\
( fail='This field must be empty (missing or blank)'
)
class Empty( Blank, Missing ):
pass
@messages\
( fail='Value must match %(criterion)s'
)
class Match( Validator ):
RAW = 'Match_RAW'
REGEX = 'Match_REGEX'
VALIDATOR = 'Match_VALIDATOR'
def setParameters(self, criterion, ignoreCase=False):
if not isinstance( criterion, ValidatorBase ):
if hasattr(getattr( criterion, 'match', None ),'__call__'):
self.type = Match.REGEX
else:
self.type = Match.RAW
else:
self.type = Match.VALIDATOR
self.ignoreCase = ignoreCase
self.criterion = criterion
def appendSubValidators( self, subValidators ):
if self.type == Match.VALIDATOR:
self.criterion.appendSubValidators( subValidators )
subValidators.append( self.criterion )
def on_value(self, context, value ):
if self.type is Match.REGEX:
if _python3 and isinstance( value, bytes):
value = value.decode('utf8')
if not self.criterion.match(value):
raise Invalid( value, self, matchType=self.type, criterion=self.criterion.pattern)
return value
elif self.type is Match.VALIDATOR:
try:
compare = self.criterion.validate( context, value )
except Invalid as e:
raise Invalid( value, self, matchType=self.type, criterion=e )
else:
compare = self.criterion
val = value
if self.ignoreCase:
compare = str(compare).lower()
val = str(value).lower()
if val != compare:
raise Invalid( value, self, matchType=self.type, criterion=compare )
return value
def on_missing(self, context ):
if self.type is Match.VALIDATOR:
return self.on_value( context, MISSING )
return Validator.on_missing( self, context )
def on_blank(self, context, value ):
if self.type is Match.VALIDATOR:
return self.on_value( context, value )
return Validator.on_blank( self, context, value )
@messages\
( type="Can not get len from values of type %(value.type)s"
, min="Value must have at least %(min)i elements/characters (has %(len)s)"
, max="Value cannot have more than least %(max)i elements/characters (has %(len)s)"
)
class Len( Validator ):
def setParameters(self, min=1, max=None, returnLen=False):
self.min = min
self.max = max
self.returnLen = returnLen
def on_value(self, context, value):
try:
result = len(value)
except Exception:
raise Invalid( value, self, 'type' )
if result<self.min:
raise Invalid( value, self, 'min', min=self.min, max=self.max, len=result)
if self.max is not None and result > self.max:
raise Invalid( value, self, 'max', min=self.min, max=self.max, len=result)
if self.returnLen:
return result
else:
return value
@messages\
( fail="Value must be one of %(criteria)s"
)
class In( Validator ):
def setParameters( self, criteria ):
self.criteria = criteria
def on_value(self, context, value):
if not value in self.criteria:
raise Invalid( value, self, criteria=self.criteria )
return value
@messages\
( fail="Value must lower or equal to %(max)s"
)
class Max( Validator ):
def setParameters( self, max ):
self.max = max
def on_value(self, context, value):
if value > self.max:
raise Invalid( value, self, max=self.max )
return value
@messages\
( fail="Value must greater or equal to %(min)s"
)
class Min( Validator ):
def setParameters( self, min ):
self.min = min
def on_value(self, context, value):
if value < self.min:
raise Invalid( value, self, min=self.min )
return value
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-01 02:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bpay', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='bpaycollection',
table='payments_bpaycollection_v',
),
]
|
from django.db import models
class Activity(models.Model):
title = models.CharField(max_length=150)
grade_CHOICES = ((1, 1), (2, 2), (3, 3), (4, 4), (5, 5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12))
grade = models.IntegerField(default=True, choices=grade_CHOICES)
link = models.URLField(max_length=200, null=True, default=None, blank=True)
# Create your models here. |
class Solution:
def isPalindrome(self, x: int) -> bool:
L = 2**31
MAX = L-1
MIN = -L
if not MIN < x < MAX :
return False
if x <0 :
return False
x = str(x)
rev_x = x[::-1]
if int(rev_x) > L:
return False
if x == rev_x :
return True
else:
return False
## logic ##
''' comparing int is faster
use built int str() and int() function :
reverse the string and convert it to int
and check where num and reverse num is same or not
''' |
import asyncio
from pyjamas_core.util import Input, Output, Property
from pyjamas_core.supermodel import Supermodel
class Model(Supermodel):
"""
schedules the func gates of the agent
sets the number of elapsed rounds as output
"""
def __init__(self, uuid, name: str):
super(Model, self).__init__(uuid,name)
self.elapsed = 0
self.outputs['num_elapsed'] = Output('Number Elapsed', unit='int')
self.properties["number_of_exec"] = Property('Number of Executions', default=-1, data_type=float, unit='int')
self.properties["peri_interval"] = Property('Peri Interval', default=0, data_type=float, unit='num')
self.properties["prep_lead"] = Property('Prep lead', default=0, data_type=float, unit='num')
self.properties["post_delay"] = Property('Post delay', default=0, data_type=float, unit='num')
def close_gates(self):
if self.get_property('prep_lead') > 0:
self.log_debug("closing prep gate")
self.agent.prep_gate.clear()
if self.get_property('peri_interval') > 0:
self.log_debug("closing peri gate")
self.agent.peri_gate.clear()
if self.get_property('post_delay') > 0:
self.log_debug("closing post gate")
self.agent.post_gate.clear()
async def loop(self):
while (self.get_property('number_of_exec') > self.elapsed or self.get_property('number_of_exec') < 0) and self.alive:
# TODO: change wait time to be relative to start time to remove cumulative error
if not self.agent.prep_gate.is_set():
self.log_debug("opening prep gate")
self.agent.prep_gate.set()
await asyncio.sleep(self.get_property('prep_lead'))
if not self.agent.peri_gate.is_set():
self.log_debug("opening peri gate")
self.agent.peri_gate.set()
await asyncio.sleep(self.get_property('post_delay'))
if not self.agent.post_gate.is_set():
self.log_debug("opening post gate")
self.agent.post_gate.set()
sleeptime = self.get_property('peri_interval') - self.get_property('prep_lead') - self.get_property('post_delay')
await asyncio.sleep(sleeptime)
self.log_debug("leaving scheduler loop")
async def func_birth(self):
if self.get_property('number_of_exec') == 0:
self.agent.end_all_model_loops()
return
self.close_gates()
asyncio.ensure_future(self.loop())
async def func_prep(self):
self.set_output("num_elapsed", self.elapsed)
async def func_post(self, peri_to_post=None):
self.elapsed = self.elapsed + 1
async def func_in_sync(self):
self.close_gates()
if self.get_property('number_of_exec') <= self.elapsed and self.get_property('number_of_exec') >= 0:
self.agent.end_all_model_loops() |
import logging
from atlassian import Bamboo
"""
That example shows how to clean up Bamboo incomplete or Unknown build results
"""
logging.basicConfig(level=logging.ERROR)
REMOVE = True
STATUS_CLEANED_RESULTS = ["Incomplete", "Unknown"]
EXCLUDED_PROJECTS = ["EXCLUDE_PROJECT"]
BAMBOO_LOGIN = "admin"
BAMBOO_PASS = "password"
BAMBOO_URL = "https://bamboo.example.com"
def get_all_projects():
return [x["key"] for x in bamboo.projects(max_results=1000)]
def get_plans_from_project(proj):
return [x["key"] for x in bamboo.project_plans(proj)]
def get_branches_from_plan(plan_key):
return [x["id"] for x in bamboo.search_branches(plan_key, max_results=1000, start=0)]
def get_results_from_branch(plan_key):
return [x for x in bamboo.results(plan_key, expand="results.result", max_results=100, include_all_states=True)]
def remove_build_result(build_key, status):
result = bamboo.build_result(build_key=build_key)
if result.get("buildState") == status:
print("Removing build result - {}".format(build_key))
if REMOVE:
bamboo.delete_build_result(build_key=build_key)
def project_review(plans):
for plan in plans:
print("Inspecting {} plan".format(plan))
branches = get_branches_from_plan(plan)
for branch in branches:
build_results = get_results_from_branch(branch)
for build in build_results:
build_key = build.get("buildResultKey") or None
print("Inspecting build - {}".format(build_key))
if build_key:
for status in STATUS_CLEANED_RESULTS:
remove_build_result(build_key=build_key, status=status)
if __name__ == "__main__":
bamboo = Bamboo(url=BAMBOO_URL, username=BAMBOO_LOGIN, password=BAMBOO_PASS, timeout=180)
projects = get_all_projects()
for project in projects:
if project in EXCLUDED_PROJECTS:
continue
print("Inspecting project - {}".format(project))
results = []
all_plans_of_project = get_plans_from_project(project)
project_review(plans=all_plans_of_project)
|
from timeit import default_timer as timer
import fire
import h5py
import numpy as np
def startTimer():
print("Timer is starting")
start = timer()
hf = h5py.File('time.h5', 'w')
hf.create_dataset('timing', data=start)
hf.close()
def endTimer():
print("Timer ends")
end = timer()
hf = h5py.File('time.h5', 'r')
start = hf.get('timing')
start = np.array(start)
hf.close()
diff = (end - start) // 60
print("Run time:")
print(diff)
if __name__ == "__main__":
fire.Fire()
|
from minder_utils.models.feature_extractors import SimCLR, Partial_Order, AutoEncoder
from minder_utils.dataloader import process_data
from minder_utils.evaluate.evaluate_models import evaluate_features
from minder_utils.dataloader import Dataloader
from minder_utils.util.initial import first_run
import pandas as pd
import numpy as np
import os
os.chdir('..')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# If you haven't processed the data, please uncomment the line below
# first_run()
# if you have processed the data, then just pass None to the dataloader
labelled_data = Dataloader(None).labelled_data
unlabelled_data = Dataloader(None).unlabelled_data['activity']
unlabelled, X, y, p_ids = process_data(labelled_data, unlabelled_data, num_days_extended=0)
# Train feature extractors and save the features
# Note you only need to train them once, then the model will be saved automatically.
# If you want to retrain the model, set retrain as True in config_feature_extractor.yaml
# SimCLR().fit(unlabelled).transform(X)
AutoEncoder().fit(unlabelled).transform(X)
# All the features have been saved and it's ready to test
df = evaluate_features(X, y, p_ids, num_runs=10, valid_only=True)
df['extractor'] = 'None'
results = [df]
for feature in ['simclr', 'autoencoder']:
feat = np.load('./data/extracted_features/{}.npy'.format(feature))
df = evaluate_features(feat, y, p_ids, num_runs=10, valid_only=True)
df['extractor'] = feature
results.append(df)
print(pd.concat(results))
|
# -*- coding: utf-8 -*-
from enum import Enum
class ChatMode(Enum):
MODE2017 = 'mode_2017'
MODE2018 = 'mode_2018'
MODE2021 = 'mode_2021'
|
import os
import sys
import warnings
from inspect import getmembers, isfunction
from ase.io import read
unsupported_template = '\nProperty "%s" not available. Please verify which features' \
'in Sapphire are supported first by calling\n' \
'\nfrom Utilities.Supported import Supported\n' \
'print(Supported().Full(), Supported().Homo(), Supported().Hetero())\n'
none_template = '\nSystem property "%s" is bad. Typically, this is because the ' \
'required information has not been provied by the user or is given incorrectly.\n' \
'Reverting to System default "%s".\n'
class _Clean_System(object):
def __init__(self, System={}):
self.System = System
self.file = 'Sapphire_Info.txt'
self.Default = {
'base_dir': '',
'movie_file_name': 'movie.xyz',
'energy_file_name': None,
'extend_xyz': None,
'Homo': None,
'Hetero': None,
'Start': 0, 'End': None, 'Step': 1, 'Skip': 50,
'UniformPDF': False, 'Band': 0.05
}
self.Keys = list(self.Default.keys())
self.FunkList = [o for o in getmembers(_Clean_System) if isfunction(o[1])]
self.Funks = [x[0] for x in self.FunkList if not x[0].startswith('_')]
for x in self.Funks:
getattr(self, x)()
def Abase_dir(self):
def _no_base():
self.System['base_dir'] = ''
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % (self.System['base_dir'], self.Default['base_dir']))
try:
self.System['base_dir']
if type(self.System['base_dir']) is not str:
self._no_base()
else:
if self.System['base_dir'] == '':
pass
else:
if not os.path.isdir(self.System['base_dir']):
_no_base()
except KeyError:
_no_base()
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nInitialising...\n")
def Bmovie_file_name(self):
def _exit():
try:
if not os.path.isfile(self.System['base_dir']+self.System['movie_file_name']):
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("\nNo trajectory file can be found at the specified location.\n"
"Please check your local directories and re-write your input file.\n"
"Sapphire will now terminate.\n")
raise SystemExit("No trajectory found at '%s'.\n" % (
self.System['base_dir']+self.System['movie_file_name']))
_exit()
except Exception as e:
sys.exit('\nCannot find this file.\nExiting now due to error rasied as:\n.%s' % e)
try:
_exit()
if type(self.System['movie_file_name']) is not str:
self.System['movie_file_name'] = self.Default['movie_file_name']
_exit()
warnings.warn(none_template % ('movie_file_name', self.Default['movie_file_name']))
with open(self.System['movie_file_name']+self.file, 'a') as warn:
warn.write(none_template % ('movie_file_name', self.Default['movie_file_name']))
_exit()
else:
if not os.path.isfile(self.System['base_dir']+self.System['movie_file_name']):
self.System['movie_file_name'] = self.Default['movie_file_name']
warnings.warn(none_template % ('movie_file_name', self.Default['movie_file_name']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('movie_file_name', self.Default['movie_file_name']))
_exit()
except Exception as e:
self.System['movie_file_name'] = self.Default['movie_file_name']
warnings.warn(none_template % ('movie_file_name', self.Default['movie_file_name']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(
none_template % (
self.System['movie_file_name'], self.Default['movie_file_name']
)
)
_exit()
with open(self.System['base_dir']+self.file, "a") as f:
f.write('\nReading from the %s file.\n' % (self.System['base_dir']+self.System['movie_file_name']))
"""
def Cenergy_file_name(self):
"""""""
Please note that this command has since been removed due to being obsolete.
"""""""
def _no_file():
self.System['energy_file_name'] = self.Default['energy_file_name']
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("\nNo energy file can be found at the specified location.\n'%s'\n"
"Please check your local directories and re-write your input file if you want energetic analysis.\n"
% (self.System['base_dir']))
try:
if type(self.System['energy_file_name']) is not str:
self.System['energy_file_name'] = self.Default['energy_file_name']
warnings.warn(none_template % ('energy_file_name', self.Default['energy_file_name']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('energy_file_name', self.Default['energy_file_name']))
_no_file()
else:
if not os.path.isfile(self.System['base_dir']+self.System['energy_file_name']):
_no_file()
except Exception as e:
_no_file()
"""
def Dextend_xyz(self):
try:
if type(self.System['extend_xyz']) is not list:
self.System['extend_xyz'] = self.Default['extend_xyz']
warnings.warn(none_template % ('extend_xyz', self.Default['extend_xyz']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('extend_xyz', self.Default['extend_xyz']))
else:
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("Will attempt to write the following quantities into an extended xyz file:\n")
for x in self.System['extend_xyz']:
warn.write("%s\n" % x)
except KeyError:
self.System['extend_xyz'] = self.Default['extend_xyz']
warnings.warn(none_template % ('extend_xyz', self.Default['extend_xyz']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('extend_xyz', self.Default['extend_xyz']))
def _no_homo(self):
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("\nNo specie-specific properties for homo species will be calculated in this run.\n")
self.System['Homo'] = self.Default['Homo']
def EHomo(self):
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nChecking user input for calculating homo properties in this run.\n")
try:
self.System['Homo']
if self.System['Homo'] is None:
self._no_homo()
elif type(self.System['Homo']) is list:
Temp = read(
self.System['base_dir']+self.System['movie_file_name'],
index=0).get_chemical_symbols()
used = set()
Species = [x for x in Temp
if x not in used and (used.add(x) or True)]
Temp = []
for x in self.System['Homo']:
if x not in Species:
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nChemical specie %s not present in the trajectory."
"Consequently, this shall be discarded from Homo.\n" % x)
else:
Temp.append(x)
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nSpecies being considered are:\n"+'\t'.join(str(x) for x in Temp))
self.System['Homo'] = Temp
except Exception as e:
self._no_homo()
def _no_hetero(self):
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("\nNo specie-specific properties for homo species will be calculated in this run.\n")
self.System['Hetero'] = self.Default['Hetero']
def GHetero(self):
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nChecking user input for calculating homo properties in this run.\n")
try:
self.System['Hetero']
if self.System['Hetero'] is None:
self._no_hetero()
except KeyError:
self._no_hetero()
def IStart(self):
try:
self.System['Start']
if type(self.System['Start']) is not int or self.System['Start'] < 0:
self.System['Start'] = 0
warnings.warn(none_template % ('Start', self.Default['Start']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Start', self.Default['Start']))
else:
with open(self.System['base_dir']+self.file, 'a') as file:
file.write("\nInitial frame has been set to %s.\n" % self.System['Start'])
except KeyError:
self.System['Start'] = 0
warnings.warn(none_template % ('Start', self.Default['Start']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Start', self.Default['Start']))
def JEnd(self):
try:
if not type(self.System['End']) is int or self.System['End'] < self.System['Start']:
Temp = read(self.System['base_dir']+self.System['movie_file_name'], index=':')
self.Default['End'] = len(Temp)
self.System['End'] = len(Temp)
del(Temp)
warnings.warn(none_template % ('End', self.Default['End']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % (self.System['End'], self.Default['End']))
elif self.System['End'] < self.System['Start']:
Temp = read(self.System['base_dir']+self.System['movie_file_name'], index=':')
self.Default['End'] = len(Temp)
self.System['End'] = len(Temp)
del(Temp)
warnings.warn(none_template % ('End', self.Default['End']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('End', self.Default['End']))
else:
with open(self.System['base_dir']+self.file, 'a') as file:
file.write("\nFinal frame has been set to %s.\n" % self.System['End'])
except KeyError:
Temp = read(self.System['base_dir']+self.System['movie_file_name'], index=':')
self.Default['End'] = len(Temp)
self.System['End'] = len(Temp)
del(Temp)
warnings.warn(none_template % ('End', self.Default['End']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('End', self.Default['End']))
def KStep(self):
try:
if not type(self.System['Step']) is int or self.System['Step'] < 1:
self.System['Step'] = self.Default['Step']
warnings.warn(none_template % ('Step', self.Default['Step']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Step', self.Default['Step']))
except KeyError:
self.System['Step'] = self.Default['Step']
warnings.warn(none_template % ('Step', self.Default['Step']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Step', self.Default['Step']))
def LSkip(self):
try:
if not type(self.System['Skip']) is int or self.System['Skip'] < 1:
self.Default['Skip'] = int(self.System['End']-self.System['Start']/25.0)
if self.Default['Skip'] < 1:
self.Default['Skip'] = 1
warnings.warn(none_template % ('Skip', self.Default['Skip']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Skip', self.Default['Skip']))
self.System['Skip'] = self.Default['Skip']
except KeyError:
self.Default['Skip'] = int(self.System['End']-self.System['Start']/25.0)
if self.Default['Skip'] < 1:
self.Default['Skip'] = 1
warnings.warn(none_template % ('Step', self.Default['Step']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Step', self.Default['Step']))
self.System['Skip'] = self.Default['Skip']
def MUniformPDF(self):
try:
if type(self.System['UniformPDF']) is not bool:
warnings.warn(none_template % ('UniformPDF', self.Default['UniformPDF']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('UniformPDF', self.Default['UniformPDF']))
self.System['UniformPDF'] = self.Default['UniformPDF']
except KeyError:
warnings.warn(none_template % ('UniformPDF', self.Default['UniformPDF']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('UniformPDF', self.Default['UniformPDF']))
self.System['UniformPDF'] = self.Default['UniformPDF']
def NBand(self):
try:
if type(self.System['Band']) is not float:
self.Default['Band'] = self.Default['Band']
warnings.warn(none_template % ('Band', self.Default['Band']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Band', self.Default['Band']))
self.System['Band'] = self.Default['Band']
except KeyError:
warnings.warn(none_template % ('Band', self.Default['Band']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Band', self.Default['Band']))
self.System['Band'] = self.Default['Band']
|
from django.urls import path, include
from mighty.applications.tenant import views
app_name = 'tenant'
api_urlpatterns = [
path('tenant/', include([
path('', views.TenantList.as_view(), name="api-tenant-list"),
path('<uuid:uid>/', views.TenantDetail.as_view(), name="api-tenant-detail"),
path('<uuid:uid>/current/', views.CurrentTenant.as_view(), name="api-tenant-current"),
#path('invitation/', include([
# path('', views.InvitationList.as_view(), name="api-invitation-exist"),
# path('<uuid:uid>/', views.InvitationDetail.as_view(), name="api-tenant-invitation"),
# path('<uuid:uid>/<str:action>/', views.InvitationDetail.as_view(), name="api-tenant-invitation-action"),
#])),
path('role/', include([
path('', views.RoleList.as_view(), name="api-role-list"),
path('<uuid:uid>/', views.RoleDetail.as_view(), name="api-role-detail"),
path('exist/', views.RoleCheckData.as_view(), name="api-role-exist"),
])),
])),
] |
import os
import argparse
import numpy as np
try:
from sklearn.cluster import KMeans
except:
raise ImportError('''
Error! Can not import sklearn.
Please install the package sklearn via the following command:
pip install sklearn
''')
try:
import pyoctree
except:
raise ImportError('''\n
Error! Can not import pyoctree.
Please build the octree with python enabled via the following commands:
cd octree/build
cmake .. -DUSE_PYTHON=ON && cmake --build . --config Release
export PYTHONPATH=`pwd`/python:$PYTHONPATH
''')
parser = argparse.ArgumentParser()
parser.add_argument('--run', type=str, required=True,
help='The command to run.')
parser.add_argument('--converter', type=str, required=False,
default='util/convert_tfrecords.py',
help='The path of the convert_tfrecords')
parser.add_argument('--scanner', type=str, required=False,
help='The path of the virtual_scanner')
parser.add_argument('--simplify_points', type=str, required=False,
default='simplify_points',
help='The path of the simplify_points')
abs_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
root_folder = os.path.join(abs_path, 'script/dataset/midnet_data')
args = parser.parse_args()
converter = os.path.join(abs_path, args.converter)
virtual_scanner = args.scanner
simplify = args.simplify_points
def download_data():
# download via wget
if not os.path.exists(root_folder):
os.makedirs(root_folder)
url = 'https://www.dropbox.com/s/lynuimh1bbtnkty/midnet_data.zip?dl=0'
cmd = 'wget %s -O %s.zip' % (url, root_folder)
print(cmd)
os.system(cmd)
# unzip
cmd = 'unzip %s.zip -d %s/..' % (root_folder, root_folder)
print(cmd)
os.system(cmd)
def shapenet_unzip():
shapenet = os.path.join(root_folder, 'ShapeNetCore.v1.zip')
cmd = 'unzip %s -d %s' % (shapenet, root_folder)
print(cmd)
os.system(cmd)
shapenet_folder = os.path.join(root_folder, 'ShapeNetCore.v1')
filenames = os.listdir(shapenet_folder)
for filename in filenames:
abs_name = os.path.join(shapenet_folder, filename)
if not filename.endswith('.zip'):
os.remove(abs_name)
else:
cmd = 'unzip %s -d %s' % (abs_name, shapenet_folder)
print(cmd)
os.system(cmd)
def shapenet_move_objs():
shapenet_folder = os.path.join(root_folder, 'ShapeNetCore.v1')
mesh_folder = os.path.join(root_folder, 'mesh')
folders = os.listdir(shapenet_folder)
for folder in folders:
src_folder = os.path.join(shapenet_folder, folder)
des_folder = os.path.join(mesh_folder, folder)
if not os.path.isdir(src_folder):
continue
if not os.path.exists(des_folder):
os.makedirs(des_folder)
filenames = os.listdir(src_folder)
for filename in filenames:
src_filename = os.path.join(src_folder, filename, 'model.obj')
des_filename = os.path.join(des_folder, filename + '.obj')
if not os.path.exists(src_filename):
print('Warning: not exist - ', src_filename)
continue
os.rename(src_filename, des_filename)
def shapenet_convert_mesh_to_points():
mesh_folder = os.path.join(root_folder, 'mesh')
# Delete the following 3 files since the virtualscanner can not deal with them
filelist = ['03624134/67ada28ebc79cc75a056f196c127ed77.obj',
'04074963/b65b590a565fa2547e1c85c5c15da7fb.obj',
'04090263/4a32519f44dc84aabafe26e2eb69ebf4.obj']
for filename in filelist:
filename = os.path.join(mesh_folder, filename)
if os.path.exists(filename):
os.remove(filename)
# run virtualscanner
folders = os.listdir(mesh_folder)
for folder in folders:
curr_folder = os.path.join(mesh_folder, folder)
cmd = '%s %s 14' % (virtual_scanner, curr_folder)
print(cmd)
os.system(cmd)
# move points
points_folder = os.path.join(root_folder, 'points.dense')
for folder in folders:
src_folder = os.path.join(mesh_folder, folder)
des_folder = os.path.join(points_folder, folder)
if not os.path.exists(des_folder):
os.makedirs(des_folder)
filenames = os.listdir(src_folder)
for filename in filenames:
if filename.endswith('.points'):
os.rename(os.path.join(src_folder, filename),
os.path.join(des_folder, filename))
def shapenet_simplify_points(resolution=64):
# rename and backup the original folders
points_folder = os.path.join(root_folder, 'points')
original_folder = os.path.join(root_folder, 'points.dense')
# if os.path.exists(points_folder):
# os.rename(points_folder, original_folder)
folders = os.listdir(original_folder)
for folder in folders:
# write filelist to disk
curr_folder = os.path.join(original_folder, folder)
filenames = os.listdir(curr_folder)
filelist_name = os.path.join(curr_folder, 'list.txt')
with open(filelist_name, 'w') as fid:
for filename in filenames:
if filename.endswith('.points'):
fid.write(os.path.join(curr_folder, filename) + '\n')
# run simplify_points
output_path = os.path.join(points_folder, folder)
if not os.path.exists(output_path):
os.makedirs(output_path)
cmd = '%s --filenames %s --output_path %s --dim %d' % \
(simplify, filelist_name, output_path, resolution)
print(cmd)
os.system(cmd)
os.remove(filelist_name)
def point_cloud_clustering(point_cloud, n_clusters=100):
pt_num = point_cloud.pts_num()
pts = point_cloud.points()
normals = point_cloud.normals()
X = np.array(pts).reshape(pt_num, 3)
if pt_num < n_clusters:
n_clusters = pt_num
y_pred = KMeans(n_clusters=n_clusters, n_init=1).fit_predict(X)
succ = point_cloud.set_points(pts, normals, [], y_pred.tolist())
return point_cloud
def shapenet_clustering():
points_folder = os.path.join(root_folder, 'points')
output_folder = os.path.join(root_folder, 'points.64.cluster.100')
folders = os.listdir(points_folder)
for folder in folders:
src_folder = os.path.join(points_folder, folder)
des_folder = os.path.join(output_folder, folder)
if not os.path.exists(des_folder):
os.makedirs(des_folder)
print('Processing: ' + des_folder)
filenames = os.listdir(src_folder)
point_cloud = pyoctree.Points()
for filename in filenames:
if filename.endswith('.points'):
succ = point_cloud.read_points(os.path.join(src_folder, filename))
assert succ
point_cloud = point_cloud_clustering(point_cloud)
succ = point_cloud.write_points(os.path.join(des_folder, filename))
assert succ
def shapenet_generate_points_tfrecords():
points_folder = os.path.join(root_folder, 'points.64.cluster.100')
filelist = os.path.join(root_folder, 'filelist.txt')
folders = sorted(os.listdir(points_folder))
with open(filelist, 'w') as fid:
for i, folder in enumerate(folders):
filenames = os.listdir(os.path.join(points_folder, folder))
for filename in filenames:
if filename.endswith('.points'):
filename = os.path.join(folder, filename)
fid.write('%s %d\n' % (filename, i))
tfrecords_name = os.path.join(
root_folder, 'shapenet.points.64.cluster.100.tfrecords')
cmd = 'python %s --shuffle true --file_dir %s --list_file %s --records_name %s' % \
(converter, points_folder, filelist, tfrecords_name)
print(cmd)
os.system(cmd)
def shapenet_create_tfrecords():
shapenet_unzip()
shapenet_move_objs()
shapenet_convert_mesh_to_points()
shapenet_simplify_points()
shapenet_clustering()
shapenet_generate_points_tfrecords()
if __name__ == '__main__':
eval('%s()' % args.run)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: __init__
:synopsis: module that contains a class that encompasses the properties of the configuration file and maps it.
"""
import os
import sys
import logging
from collections import OrderedDict
from os import path, environ
from urllib.parse import parse_qs
from typing import (
List,
Union,
Tuple
)
import yaml
from jsonschema import validate
import graphql
from graphql import parse as graphql_parse
from graphql.language.printer import print_ast as graphql_print_ast
from mockintosh.constants import PROGRAM, WARN_GPUBSUB_PACKAGE, WARN_AMAZONSQS_PACKAGE
from mockintosh.builders import ConfigRootBuilder
from mockintosh.helpers import _detect_engine, _urlsplit, _graphql_escape_templating, _graphql_undo_escapes
from mockintosh.config import (
ConfigRoot,
ConfigHttpService,
ConfigAsyncService,
ConfigMultiProduce,
ConfigGlobals,
ConfigExternalFilePath
)
from mockintosh.recognizers import (
PathRecognizer,
HeadersRecognizer,
QueryStringRecognizer,
BodyTextRecognizer,
BodyUrlencodedRecognizer,
BodyMultipartRecognizer,
BodyGraphQLVariablesRecognizer,
AsyncProducerValueRecognizer,
AsyncProducerKeyRecognizer,
AsyncProducerHeadersRecognizer,
AsyncProducerAmqpPropertiesRecognizer
)
from mockintosh.services.http import (
HttpService,
HttpEndpoint,
HttpBody
)
from mockintosh.services.asynchronous.kafka import ( # noqa: F401
KafkaService,
KafkaActor,
KafkaConsumer,
KafkaProducer,
KafkaProducerPayloadList,
KafkaProducerPayload
)
from mockintosh.services.asynchronous.amqp import ( # noqa: F401
AmqpService,
AmqpActor,
AmqpConsumer,
AmqpProducer,
AmqpProducerPayloadList,
AmqpProducerPayload
)
from mockintosh.services.asynchronous.redis import ( # noqa: F401
RedisService,
RedisActor,
RedisConsumer,
RedisProducer,
RedisProducerPayloadList,
RedisProducerPayload
)
try:
from mockintosh.services.asynchronous.gpubsub import ( # noqa: F401
GpubsubService,
GpubsubActor,
GpubsubConsumer,
GpubsubProducer,
GpubsubProducerPayloadList,
GpubsubProducerPayload
)
except ModuleNotFoundError:
pass
try:
from mockintosh.services.asynchronous.amazonsqs import ( # noqa: F401
AmazonsqsService,
AmazonsqsActor,
AmazonsqsConsumer,
AmazonsqsProducer,
AmazonsqsProducerPayloadList,
AmazonsqsProducerPayload
)
except ModuleNotFoundError:
pass
from mockintosh.services.asynchronous.mqtt import ( # noqa: F401
MqttService,
MqttActor,
MqttConsumer,
MqttProducer,
MqttProducerPayloadList,
MqttProducerPayload
)
from mockintosh.exceptions import (
UnrecognizedConfigFileFormat,
AsyncProducerListQueueMismatch
)
from mockintosh.templating import RenderingQueue
from mockintosh.stats import Stats
from mockintosh.logs import Logs
graphql.language.printer.MAX_LINE_LENGTH = -1
stats = Stats()
logs = Logs()
class Definition:
def __init__(
self,
source: str,
schema: dict,
rendering_queue: RenderingQueue,
is_file: bool = True,
load_override: Union[dict, None] = None
):
self.source = source
self.source_text = None if is_file else source
data_dir_override = environ.get('%s_DATA_DIR' % PROGRAM.upper(), None)
if data_dir_override is not None:
self.source_dir = path.abspath(data_dir_override)
else:
self.source_dir = path.dirname(path.abspath(source)) if source is not None and is_file else None
self.data = None
self.schema = schema
self.rendering_queue = rendering_queue
if load_override is not None:
self.data = load_override
else:
self.load()
self.validate()
self.template_engine = _detect_engine(self.data, 'config')
self.stats = stats
self.logs = logs
self.services, self.config_root = self.analyze(self.data)
self.globals = self.config_root.globals
def load(self) -> None:
if self.source_text is None:
with open(self.source, 'r') as file:
logging.info('Reading configuration file from path: %s', self.source)
self.source_text = file.read()
logging.debug('Configuration text: %s', self.source_text)
try:
self.data = yaml.safe_load(self.source_text)
logging.info('Configuration file is a valid YAML file.')
except (yaml.scanner.ScannerError, yaml.parser.ParserError) as e:
raise UnrecognizedConfigFileFormat(
'Configuration file is neither a JSON file nor a YAML file!',
self.source,
str(e)
)
def validate(self):
validate(instance=self.data, schema=self.schema)
logging.info('Configuration file is valid according to the JSON schema.')
def analyze(self, data: dict) -> Tuple[
List[
Union[
HttpService,
KafkaService,
AmqpService,
RedisService,
MqttService
]
],
ConfigRoot
]:
config_root_builder = ConfigRootBuilder()
config_root = config_root_builder.build(data)
for service in ConfigAsyncService.services:
service.address_template_renderer(
self.template_engine,
self.rendering_queue
)
new_services = []
for service in config_root.services:
self.logs.add_service(service.get_name())
self.stats.add_service(service.get_hint())
if isinstance(service, ConfigAsyncService):
new_services.append(self.analyze_async_service(service))
elif isinstance(service, ConfigHttpService):
new_services.append(
self.analyze_http_service(
service,
self.template_engine,
self.rendering_queue,
performance_profiles=config_root.performance_profiles,
global_performance_profile=None if config_root.globals is None else config_root.globals.performance_profile
)
)
return new_services, config_root
def analyze_http_service(
self,
service: ConfigHttpService,
template_engine: str,
rendering_queue: RenderingQueue,
performance_profiles: Union[dict, None] = None,
global_performance_profile: Union[ConfigGlobals, None] = None,
internal_http_service_id: Union[int, None] = None
):
performance_profiles = {} if performance_profiles is None else performance_profiles
http_service = HttpService(
service.port,
service.name,
service.hostname,
service.ssl,
service.ssl_cert_file,
service.ssl_key_file,
service.management_root,
service.oas,
service.performance_profile,
service.fallback_to,
service.internal_service_id,
internal_http_service_id=internal_http_service_id
)
service._impl = http_service
service_perfomance_profile = service.performance_profile if service.performance_profile is not None else global_performance_profile
for endpoint in service.endpoints:
orig_path = endpoint.path
params = {}
context = OrderedDict()
performance_profile = performance_profiles.get(
endpoint.performance_profile if endpoint.performance_profile is not None else service_perfomance_profile,
None
)
if performance_profile is not None:
performance_profile = performance_profile.actuator
scheme, netloc, path, query, fragment = _urlsplit(endpoint.path)
query_string = {}
parsed_query = parse_qs(query, keep_blank_values=True)
query_string.update(endpoint.query_string)
query_string.update({k: parsed_query[k] for k, v in parsed_query.items()})
path_recognizer = PathRecognizer(
path,
params,
context,
template_engine,
rendering_queue
)
path, priority = path_recognizer.recognize()
headers_recognizer = HeadersRecognizer(
endpoint.headers,
params,
context,
template_engine,
rendering_queue
)
headers = headers_recognizer.recognize()
query_string_recognizer = QueryStringRecognizer(
query_string,
params,
context,
template_engine,
rendering_queue
)
query_string = query_string_recognizer.recognize()
http_body = None
if endpoint.body is not None:
graphql_query = None if endpoint.body.graphql_query is None else endpoint.body.graphql_query
if isinstance(graphql_query, ConfigExternalFilePath):
external_path = self.resolve_relative_path('GraphQL', graphql_query.path)
with open(external_path, 'r') as file:
logging.debug('Reading external file from path: %s', external_path)
graphql_query = file.read()
if graphql_query is not None:
graphql_query = _graphql_escape_templating(graphql_query)
logging.debug('Before GraphQL parse/unparse:\n%s', graphql_query)
graphql_ast = graphql_parse(graphql_query)
graphql_query = graphql_print_ast(graphql_ast).strip()
logging.debug('After GraphQL parse/unparse:\n%s', graphql_query)
graphql_query = _graphql_undo_escapes(graphql_query)
logging.debug('Rendered GraphQL:\n%s', graphql_query)
body_text_recognizer = BodyTextRecognizer(
graphql_query if graphql_query is not None else endpoint.body.text,
params,
context,
template_engine,
rendering_queue
)
text = body_text_recognizer.recognize()
body_urlencoded_recognizer = BodyUrlencodedRecognizer(
endpoint.body.urlencoded,
params,
context,
template_engine,
rendering_queue
)
urlencoded = body_urlencoded_recognizer.recognize()
body_multipart_recognizer = BodyMultipartRecognizer(
endpoint.body.multipart,
params,
context,
template_engine,
rendering_queue
)
multipart = body_multipart_recognizer.recognize()
body_graphql_variables_recognizer = BodyGraphQLVariablesRecognizer(
endpoint.body.graphql_variables,
params,
context,
template_engine,
rendering_queue
)
graphql_variables = body_graphql_variables_recognizer.recognize()
http_body = HttpBody(
endpoint.body.schema,
text,
urlencoded,
multipart,
graphql_variables,
is_grapql_query=True if graphql_query is not None else False
)
http_service.add_endpoint(
HttpEndpoint(
endpoint.id,
orig_path,
params,
context,
performance_profile,
priority,
path,
endpoint.comment,
endpoint.method,
query_string,
headers,
http_body,
endpoint.dataset,
endpoint.response,
endpoint.multi_responses_looped,
endpoint.dataset_looped
)
)
return http_service
def analyze_async_service(
self,
service: ConfigAsyncService
):
if service.type == 'gpubsub':
try:
import mockintosh.services.asynchronous.gpubsub # noqa: F401
except ModuleNotFoundError:
logging.error(WARN_GPUBSUB_PACKAGE)
raise
elif service.type == 'amazonsqs':
try:
import mockintosh.services.asynchronous.amazonsqs # noqa: F401
except ModuleNotFoundError:
logging.error(WARN_AMAZONSQS_PACKAGE)
raise
class_name_prefix = service.type.capitalize()
async_service = getattr(sys.modules[__name__], '%sService' % class_name_prefix)(
service.address,
name=service.name,
definition=self,
_id=service.internal_service_id,
ssl=service.ssl
)
service._impl = async_service
for i, actor in enumerate(service.actors):
async_actor = getattr(sys.modules[__name__], '%sActor' % class_name_prefix)(i, actor.name)
async_service.add_actor(async_actor)
if actor.consume is not None:
capture_limit = actor.consume.capture
value = actor.consume.value
key = actor.consume.key
headers = actor.consume.headers
amqp_properties = actor.consume.amqp_properties
params = async_actor.params
context = async_actor.context
async_producer_value_recognizer = AsyncProducerValueRecognizer(
value,
params,
context,
self.template_engine,
self.rendering_queue
)
value = async_producer_value_recognizer.recognize()
async_producer_key_recognizer = AsyncProducerKeyRecognizer(
key,
params,
context,
self.template_engine,
self.rendering_queue
)
key = async_producer_key_recognizer.recognize()
async_producer_headers_recognizer = AsyncProducerHeadersRecognizer(
{} if headers is None else headers.payload,
params,
context,
self.template_engine,
self.rendering_queue
)
headers = async_producer_headers_recognizer.recognize()
async_producer_amqp_properties_recognizer = AsyncProducerAmqpPropertiesRecognizer(
{} if amqp_properties is None else amqp_properties.__dict__,
params,
context,
self.template_engine,
self.rendering_queue
)
amqp_properties = async_producer_amqp_properties_recognizer.recognize()
async_consumer = getattr(sys.modules[__name__], '%sConsumer' % class_name_prefix)(
actor.consume.queue,
schema=actor.consume.schema,
value=value,
key=key,
headers=headers,
amqp_properties=amqp_properties,
capture_limit=capture_limit
)
async_actor.set_consumer(async_consumer)
async_actor.set_delay(actor.delay)
if actor.produce is not None:
queue = None
payload_list = getattr(sys.modules[__name__], '%sProducerPayloadList' % class_name_prefix)()
produce_list = []
if isinstance(actor.produce, ConfigMultiProduce):
queue = actor.produce.produce_list[0].queue
for _produce in actor.produce.produce_list:
if queue != _produce.queue:
raise AsyncProducerListQueueMismatch(async_actor.get_hint())
produce_list += actor.produce.produce_list
else:
queue = actor.produce.queue
produce_list += [actor.produce]
for produce in produce_list:
payload = getattr(sys.modules[__name__], '%sProducerPayload' % class_name_prefix)(
produce.value,
key=produce.key,
headers={} if produce.headers is None else produce.headers.payload,
amqp_properties={} if produce.amqp_properties is None else produce.amqp_properties.__dict__,
tag=produce.tag,
enable_topic_creation=produce.create
)
payload_list.add_payload(payload)
async_producer = getattr(sys.modules[__name__], '%sProducer' % class_name_prefix)(queue, payload_list)
async_actor.set_producer(async_producer)
async_actor.set_limit(actor.limit)
async_actor.set_dataset(actor.dataset)
async_actor.multi_payloads_looped = actor.multi_payloads_looped
async_actor.dataset_looped = actor.dataset_looped
return async_service
def resolve_relative_path(self, document_type, source_text):
relative_path = None
orig_relative_path = source_text[1:]
error_msg = 'External %s document %r couldn\'t be accessed or found!' % (document_type, orig_relative_path)
if orig_relative_path[0] == '/':
orig_relative_path = orig_relative_path[1:]
relative_path = os.path.join(self.source_dir, orig_relative_path)
if not os.path.isfile(relative_path):
raise Exception(error_msg)
relative_path = os.path.abspath(relative_path)
if not relative_path.startswith(self.source_dir):
raise Exception(error_msg)
return relative_path
|
import datetime
import json
import unittest
from app import db
from app.models.bucketlist_models import Users
from app.test_config import GlobalTestCase
from flask import url_for
class BucketlistTest(GlobalTestCase):
def setUp(self):
db.drop_all()
db.create_all()
self.user = Users(
username='johndoe',
email='[email protected]',
password='johndoe123')
db.session.add(self.user)
db.session.commit()
response = self.client.post(
url_for('login'),
data=json.dumps({
'username': 'johndoe',
'password': 'johndoe123'}),
content_type='application/json')
data = json.loads(response.get_data(as_text=True))
self.token = {'Authorization': data['token']}
self.logged_in_user = Users.query.filter_by(username='johndoe').first()
def test_can_create_bucketlist(self):
response = self.client.post(
url_for('bucketlists'),
data=json.dumps({
'bucket_name': 'test_bucketlist',
'bucket_description': 'Test bucketlist',
'date_created': str(datetime.datetime.now()),
'creator_id': self.logged_in_user.user_id
}),
content_type='application/json',
headers=self.token)
self.assert_200(response)
data = json.loads(response.get_data(as_text=True))
self.assertIsNotNone(data)
def test_can_view_one_bucketlist(self):
self.client.post(
url_for('bucketlists'),
data=json.dumps({
'bucket_name': 'test_bucketlist',
'bucket_description': 'Test bucketlist',
'date_created': str(datetime.datetime.now()),
'creator_id': self.logged_in_user.user_id
}),
content_type='application/json',
headers=self.token)
response = self.client.get(
url_for('one_bucketlist', bucketlist_id=1),
headers=self.token)
self.assert_200(response)
data = json.loads(response.get_data(as_text=True))
self.assertIsNotNone(data)
def test_can_delete_bucketlist(self):
self.client.post(
url_for('bucketlists'),
data=json.dumps({
'bucket_name': 'test_bucketlist',
'bucket_description': 'Test bucketlist',
'date_created': str(datetime.datetime.now()),
'creator_id': self.logged_in_user.user_id
}),
content_type='application/json',
headers=self.token)
response = self.client.delete(
url_for('one_bucketlist', bucketlist_id=1),
headers=self.token)
self.assert_200(response)
data = json.loads(response.get_data(as_text=True))
self.assertIsNotNone(data)
response = self.client.get(
url_for('one_bucketlist', bucketlist_id=1),
headers=self.token)
self.assert_status(response, 400)
def test_can_search_for_bucketlist(self):
self.client.post(
url_for('bucketlists'),
data=json.dumps({
'bucket_name': 'test_bucketlist',
'bucket_description': 'Test bucketlist',
'date_created': str(datetime.datetime.now()),
'creator_id': self.logged_in_user.user_id
}),
content_type='application/json',
headers=self.token)
response = self.client.get(
'/bucketlists?q=bucketlist',
headers=self.token)
self.assert_200(response)
data = json.loads(response.get_data(as_text=True))
self.assertIsNotNone(data)
response = self.client.get(
'/bucketlists?q=none',
headers=self.token)
self.assert_status(response, 400)
result = json.loads(response.get_data(as_text=True))
self.assertIsNotNone(result)
self.assertIn("does not match any bucketlist names", result['message'])
def test_can_edit_bucketlist(self):
self.client.post(
url_for('bucketlists'),
data=json.dumps({
'bucket_name': 'test_bucketlist',
'bucket_description': 'Test bucketlist',
'date_created': str(datetime.datetime.now()),
'creator_id': self.logged_in_user.user_id
}),
content_type='application/json',
headers=self.token)
response = self.client.put(
url_for('one_bucketlist', bucketlist_id=1),
data=json.dumps({
'bucket_name': 'life_bucketlist',
'bucket_description': 'Life bucketlist',
'date_created': str(datetime.datetime.now()),
'creator_id': self.logged_in_user.user_id
}),
headers=self.token)
self.assert_200(response)
data = json.loads(response.get_data(as_text=True))
self.assertIsNotNone(data)
# def tearDown(self):
# db.session.close_all()
# db.drop_all()
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
# Copyright (c) 2017 David LePage
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: bgp_element_facts
short_description: Facts about BGP based elements in the SMC
description:
- BGP elements are the building blocks to building a BGP configuration on
a layer 3 engine. Use this module to obtain available elements and their
values.
version_added: '2.5'
options:
element:
description:
- Type of bgp element to retrieve
required: true
choices:
- ip_access_list
- ip_prefix_list
- ipv6_access_list
- ipv6_prefix_list
- as_path_access_list
- community_access_list
- extended_community_access_list
- external_bgp_peer
- bgp_peering
- autonomous_system
type: str
extends_documentation_fragment:
- stonesoft
- stonesoft_facts
requirements:
- smc-python
author:
- David LePage (@gabstopper)
'''
EXAMPLES = '''
- name: BGP Facts
hosts: localhost
gather_facts: no
tasks:
- name: Retrieve all data about ane external bgp peer
bgp_facts:
element: external_bgp_peer
filter: externalpeer
- name: BGP Facts
hosts: localhost
gather_facts: no
tasks:
- name: Return all data about specified autonomous system
bgp_facts:
element: autonomous_system
filter: remoteas
- name: Routing facts about an engine
hosts: localhost
gather_facts: no
tasks:
- name: Find details about specific profile
bgp_facts:
element: bgp_profile
filter: Default BGP Profile
case_sensitive: no
'''
RETURN = '''
elements:
description: List all BGP Profiles
returned: always
type: list
sample: [{
"name": "Default BGP Profile",
"type": "bgp_profile"
}]
elements:
description: Details of a specific autonomous system
returned: always
type: list
sample: [{
"as_number": 12000,
"comment": null,
"name": "myas",
"type": "autonomous_system"
}]
elements:
description: Details about BGP Peering profile
returned: always
type: list
sample: [{
"comment": null,
"connected_check": "disabled",
"connection_profile": {
"connect_retry": 120,
"name": "Default BGP Connection Profile",
"session_hold_timer": 180,
"session_keep_alive": 60,
"type": "bgp_connection_profile"
},
"default_originate": false,
"dont_capability_negotiate": false,
"local_as_option": "not_set",
"max_prefix_option": "not_enabled",
"name": "mypeering",
"next_hop_self": true,
"orf_option": "disabled",
"override_capability": false,
"read_only": false,
"remove_private_as": false,
"route_reflector_client": false,
"send_community": "no",
"soft_reconfiguration": true,
"system": false,
"ttl_option": "disabled",
"type": "bgp_peering"
}]
'''
from ansible.module_utils.stonesoft_util import StonesoftModuleBase
try:
from smc.base.model import lookup_class
except ImportError:
pass
bgp_elements = (
'ip_access_list', 'ip_prefix_list', 'ipv6_access_list',
'ipv6_prefix_list', 'as_path_access_list', 'community_access_list',
'extended_community_access_list', 'external_bgp_peer', 'bgp_peering',
'autonomous_system'
)
def serialize_namedtuple_obj(element):
"""
Pass in instance of the access or prefix list class obtained
Element.from_href and iterate through the entries, returning
as a dict
"""
return {element.typeof:
{'name': element.name,
'comment': element.comment,
'entries': [entry._asdict() for entry in element]
}
}
def convert_to_dict(element):
"""
Convert to dict takes an instance returned from the search query
and converts it into a dict.
:rtype: dict
"""
if 'access_list' in element.typeof or 'prefix_list' in element.typeof:
return serialize_namedtuple_obj(element)
elif 'autonomous_system' in element.typeof:
return as_system_dict(element)
elif 'bgp_peering' in element.typeof:
return bgp_peering_dict(element)
elif 'external_bgp_peer' in element.typeof:
return bgp_peer_dict(element)
return {}
def as_system_dict(element):
"""
Autonomous System representation.
"""
return {'autonomous_system': {
'name': element.name,
'as_number': element.as_number,
'comment': element.comment
}
}
def bgp_peer_dict(element):
"""
External BGP Peer representation
"""
return {'external_bgp_peer': {
'name': element.name,
'neighbor_ip': element.neighbor_ip,
'neighbor_as': element.neighbor_as.name,
'neighbor_port': element.neighbor_port,
'comment': element.comment
}
}
def bgp_peering_dict(element):
"""
Representation of a BGP Peering
"""
return {'bgp_peering': {
'name': element.name,
'comment': element.comment}}
class BGPElementFacts(StonesoftModuleBase):
def __init__(self):
self.module_args = dict(
element=dict(required=True, type='str', choices=list(bgp_elements))
)
self.element = None
self.limit = None
self.filter = None
self.as_yaml = None
self.exact_match = None
self.case_sensitive = None
required_if=([
('as_yaml', True, ['filter'])])
self.results = dict(
ansible_facts=dict(
bgp_element=[]
)
)
super(BGPElementFacts, self).__init__(self.module_args, required_if=required_if,
is_fact=True)
def exec_module(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
result = self.search_by_type(lookup_class(self.element))
if self.filter:
if self.as_yaml:
elements = [convert_to_dict(element) for element in result
if element.name == self.filter]
else:
elements = [convert_to_dict(element) for element in result]
else:
elements = [{'name': element.name, 'type': element.typeof} for element in result]
self.results['ansible_facts']['bgp_element'] = [{'elements': elements}]\
if elements else []
return self.results
def main():
BGPElementFacts()
if __name__ == '__main__':
main()
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __repr__(self):
return '{ ' + str(self.val) +': ' + repr(self.left) + ', ' + repr(self.right) + '}'
class Solution:
def getDepth(self, S: str):
end = self.idx
while end < self.len:
if S[end] == '-':
end += 1
else:
break
res = end - self.idx
self.idx = end
return res
def getVal(self, S: str):
end = self.idx
while end < self.len:
if S[end] != '-':
end += 1
else:
break
res = 0 if end - self.idx < 1 else int(S[self.idx:end])
self.idx = end
return res
def recoverFromPreorder(self, S: str) -> TreeNode:
self.len = len(S)
self.idx = 0
if self.len == 0:
return None
self.root = TreeNode(self.getVal(S))
self.stack = [self.root]
while self.idx < self.len:
d = self.getDepth(S)
v = self.getVal(S)
n = TreeNode(v)
while d < len(self.stack):
self.stack.pop()
if self.stack[-1].left is None:
self.stack[-1].left = n
self.stack.append(n)
elif self.stack[-1].right is None:
self.stack[-1].right = n
self.stack.append(n)
return self.root
def main():
s = Solution()
print(s.recoverFromPreorder("1-2--3--4-5--6--7"))
main()
|
#!/usr/bin/env python
#
# License: BSD
# https://raw.githubusercontent.com/splintered-reality/py_trees/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
Demonstrates the characteristics of a typical 'action' behaviour.
.. argparse::
:module: py_trees.demos.action
:func: command_line_argument_parser
:prog: py-trees-demo-action-behaviour
.. image:: images/action.gif
"""
##############################################################################
# Imports
##############################################################################
import argparse
import atexit
import multiprocessing
import py_trees.common
import time
import py_trees.console as console
##############################################################################
# Classes
##############################################################################
def description():
content = "Demonstrates the characteristics of a typical 'action' behaviour.\n"
content += "\n"
content += "* Mocks an external process and connects to it in the setup() method\n"
content += "* Kickstarts new goals with the external process in the initialise() method\n"
content += "* Monitors the ongoing goal status in the update() method\n"
content += "* Determines RUNNING/SUCCESS pending feedback from the external process\n"
if py_trees.console.has_colours:
banner_line = console.green + "*" * 79 + "\n" + console.reset
s = "\n"
s += banner_line
s += console.bold_white + "Action Behaviour".center(79) + "\n" + console.reset
s += banner_line
s += "\n"
s += content
s += "\n"
s += banner_line
else:
s = content
return s
def epilog():
if py_trees.console.has_colours:
return console.cyan + "And his noodly appendage reached forth to tickle the blessed...\n" + console.reset
else:
return None
def command_line_argument_parser():
return argparse.ArgumentParser(description=description(),
epilog=epilog(),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
def planning(pipe_connection):
"""Emulate a (potentially) long running external process."""
idle = True
percentage_complete = 0
try:
while(True):
if pipe_connection.poll():
pipe_connection.recv()
percentage_complete = 0
idle = False
if not idle:
percentage_complete += 10
pipe_connection.send([percentage_complete])
if percentage_complete == 100:
idle = True
time.sleep(0.5)
except KeyboardInterrupt:
pass
class Action(py_trees.behaviour.Behaviour):
"""Demonstrates the at-a-distance style action behaviour.
This behaviour connects to a separately running process
(initiated in setup()) and proceeeds to work with that subprocess to
initiate a task and monitor the progress of that task at each tick
until completed. While the task is running the behaviour returns
:data:`~py_trees.common.Status.RUNNING`.
On completion, the the behaviour returns with success or failure
(depending on success or failure of the task itself).
Key point - this behaviour itself should not be doing any work!
"""
def __init__(self, name="Action"):
"""Configure the name of the behaviour."""
super(Action, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
def setup(self):
"""Kickstart the separate process this behaviour will work with.
Ordinarily this process will be already running. In this case,
setup is usually just responsible for verifying it exists.
"""
self.logger.debug("%s.setup()->connections to an external process" % (self.__class__.__name__))
self.parent_connection, self.child_connection = multiprocessing.Pipe()
self.planning = multiprocessing.Process(target=planning, args=(self.child_connection,))
atexit.register(self.planning.terminate)
self.planning.start()
def initialise(self):
"""Reset a counter variable."""
self.logger.debug("%s.initialise()->sending new goal" % (self.__class__.__name__))
self.parent_connection.send(['new goal'])
self.percentage_completion = 0
def update(self):
"""Increment the counter, monitor and decide on a new status."""
new_status = py_trees.common.Status.RUNNING
if self.parent_connection.poll():
self.percentage_completion = self.parent_connection.recv().pop()
if self.percentage_completion == 100:
new_status = py_trees.common.Status.SUCCESS
if new_status == py_trees.common.Status.SUCCESS:
self.feedback_message = "Processing finished"
self.logger.debug(
"%s.update()[%s->%s][%s]" % (
self.__class__.__name__,
self.status, new_status,
self.feedback_message
)
)
else:
self.feedback_message = "{0}%".format(self.percentage_completion)
self.logger.debug(
"%s.update()[%s][%s]" % (
self.__class__.__name__,
self.status,
self.feedback_message
)
)
return new_status
def terminate(self, new_status):
"""Nothing to clean up in this example."""
self.logger.debug(
"%s.terminate()[%s->%s]" % (
self.__class__.__name__,
self.status, new_status
)
)
##############################################################################
# Main
##############################################################################
def main():
"""Entry point for the demo script."""
command_line_argument_parser().parse_args()
print(description())
py_trees.logging.level = py_trees.logging.Level.DEBUG
action = Action()
action.setup()
try:
for _unused_i in range(0, 12):
action.tick_once()
time.sleep(0.5)
print("\n")
except KeyboardInterrupt:
pass
|
from PyPDF2 import PdfFileReader
def getTextPDF(pdfFileName, password = ''):
pdf_file = open(pdfFileName, 'rb')
read_pdf = PdfFileReader(pdf_file)
if password != '':
read_pdf.decrypt(password)
text = []
for i in range(0,read_pdf.getNumPages()):
text.append(read_pdf.getPage(i).extractText())
return '\n'.join(text) |
from mani_skill_learn.utils.data import (dict_to_seq, recursive_init_dict_array, map_func_to_dict_array,
store_dict_array_to_h5,
sample_element_in_dict_array, assign_single_element_in_dict_array, is_seq_of)
from mani_skill_learn.utils.fileio import load_h5s_as_list_dict_array, load, check_md5sum
import glob
# demo_dir = '/home/quan/example_mani_skill_data/OpenCabinetDrawer_1045_link_0-v0_pcd.h5'
# demo_dir = '/home/quan/ManiSkill-Learn/demonstrations/drawer/'
demo_dir = '/home/liuchi/zhaoyinuo/ManiSkill-Learn/full_mani_skill_data/openCabinetDrawer/'
init_buffers = glob.glob("{}/*.h5".format(demo_dir))
# print(init_buffers)
replicate_init_buffer = 1
num_trajs_per_demo_file = -1
init_buffer_size = 1
init_buffers = init_buffers[0:init_buffer_size]
buffer_keys = ['obs', 'actions', 'next_obs', 'rewards', 'dones']
if isinstance(init_buffers, str):
init_buffers = [init_buffers]
if is_seq_of(init_buffers, str):
init_buffers = [load_h5s_as_list_dict_array(_) for _ in init_buffers]
if isinstance(init_buffers, dict):
init_buffers = [init_buffers]
print('Num of datasets', len(init_buffers))
recordings = []
enough = False
gripper_coefficient = -1
for _ in range(replicate_init_buffer):
cnt = 0
if enough: break
for init_buffer in init_buffers:
if enough: break
for item in init_buffer:
if cnt >= num_trajs_per_demo_file and num_trajs_per_demo_file != -1:
break
print('================================================')
print(item['rewards'])
# for i in range(item['rewards'].shape[0]):
# print(item['rewards'][i])
# # rew_ee_handle = item['rew_ee_handle'] / 2.
# # ee_close_to_handle = item['stage_reward'] >= (-(5 + 1.5 + 0.5) + 0.5)
# # gripper_angle_reward = item['gripper_angle_rew'] / 3.
# # dist_reward = 1 - (max(0, (item['target_qpos'] - item['qpos']) / item['target_qpos']))
# # if ee_close_to_handle:
# # self_design_reward = rew_ee_handle + gripper_angle_rew + dist_reward
# # else:
# # self_design_reward = rew_ee_handle + gripper_coefficient + dist_reward
# print(self_design_reward)
#
init_buffer_size -= 1
if init_buffer_size <= 0:
enough = True
break
|
#!/usr/bin/env python3
from cmd import Cmd
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from datetime import timedelta
from enum import Enum
from threading import Lock
import argparse
import configparser
import lxml.html
import os
import json
import re
import requests
import sys
import traceback
class Sorting(Enum):
PRICE = 1
RENEW = 2
ORDER = 3
ALPHABETIC = 4
class DomainInfo:
def __init__(self, name, order, renew):
self.name = name
self.order = order
self.renew = renew
class DomainCmd(Cmd):
prompt = '(dq) '
BOOLEANS = {
'yes': True,
'enabled': True,
'true': True,
'on': True,
'no': False,
'disabled': False,
'false': False,
'off': False
}
SORTING_NAMES = {
'price': Sorting.PRICE,
'renew': Sorting.RENEW,
'order': Sorting.ORDER,
'alphabetic': Sorting.ALPHABETIC
}
SORTING_DIRECTION = {
'ascending': True,
'descending': False
}
RESETLINE = "\x1b[1K\r"
CART_TIMEOUT = timedelta(minutes=5)
def __init__(self, config_file):
super().__init__()
self.config_file = config_file
# Full list of TLDs
self.all_tlds = None
# Domain filtering
self.include_intl = False
self.include_sld = False
self.max_length = None
self.max_renew = None
self.max_order = None
# Sorting
self.sorting = Sorting.ALPHABETIC
self.sort_ascending = True
# Cart id
self.cart_id = None
self.cart_time = None
self.cart_lock = Lock()
# Status querying
self.data_lock = Lock()
self.print_lock = Lock()
self.domain_info = None
self.failed_domains = 0
self.check_aborted = False
def load_config(self):
if self.config_file is not None:
parser = configparser.ConfigParser()
parser.read(self.config_file)
if parser.has_section('filter'):
self.include_intl = parser.getboolean('filter', 'include_intl', fallback=self.include_intl)
self.include_sld = parser.getboolean('filter', 'include_sld', fallback=self.include_sld)
self.max_length = parser.getint('filter', 'max_length', fallback=self.max_length)
self.max_renew = parser.getfloat('filter', 'max_renew', fallback=self.max_renew)
self.max_order = parser.getfloat('filter', 'max_order', fallback=self.max_order)
if parser.has_section('sorting'):
if parser.has_option('sorting', 'sorting'):
self.sorting = Sorting[parser.get('sorting', 'sorting')]
self.sort_ascending = parser.getboolean('sorting', 'ascending', fallback=self.sort_ascending)
def save_config(self):
if self.config_file is not None:
writer = configparser.ConfigParser()
writer.add_section('filter')
writer.set('filter', 'include_intl', str(self.include_intl))
writer.set('filter', 'include_sld', str(self.include_sld))
if self.max_length is not None:
writer.set('filter', 'max_length', str(self.max_length))
if self.max_renew is not None:
writer.set('filter', 'max_renew', str(self.max_renew))
if self.max_order is not None:
writer.set('filter', 'max_order', str(self.max_order))
writer.add_section('sorting')
writer.set('sorting', 'sorting', self.sorting.name)
writer.set('sorting', 'ascending', str(self.sort_ascending))
try:
with open(self.config_file, 'w') as f:
writer.write(f)
except Exception as e:
print('Failed to save configuration', file=sys.stderr)
def cmdloop(self, *args):
while True:
try:
super().cmdloop(*args)
except KeyboardInterrupt:
print('', file=sys.stderr)
pass
# Do nothing on empty line
def emptyline(self):
return
def default(self, arg):
if arg == 'EOF':
self.save_config()
sys.exit(0)
self.do_check(arg)
def do_maxorder(self, arg):
self._update_optional_number('max_order', int, 'Max order price', 0, arg)
def do_maxrenew(self, arg):
self._update_optional_number('max_renew', int, 'Max renew price', 0, arg)
def do_maxlen(self, arg):
self._update_optional_number('max_length', int, 'Max TLD length', 2, arg)
def _update_optional_number(self, field, fieldType, fieldName, minValue, arg):
arg = arg.strip().casefold()
if arg != '':
enabled = True
try:
enabled = self._parse_bool(arg)
except:
pass
if enabled:
try:
value = fieldType(arg)
except ValueError:
print('Cannot parse "%s"' % arg, file=sys.stderr)
return
if value < minValue:
print('%s may not be less than %s' % (fieldName, minValue), file=sys.stderr)
return
else:
value = None
self.__dict__[field] = value
else:
value = self.__dict__[field]
if value is None:
print('%s is disabled' % fieldName, file=sys.stderr)
else:
print('%s is %s' % (fieldName, str(value)), file=sys.stderr)
def do_intl(self, arg):
arg = arg.strip()
if arg != '':
try:
self.include_intl = self._parse_bool(arg)
except ValueError as e:
print('Cannot set internationalized domain status. %s' % str(e), file=sys.stderr)
return
print('Internationalized domains are %s' % ('enabled' if self.include_intl else 'disabled'), file=sys.stderr)
def do_sld(self, arg):
arg = arg.strip()
if arg != '':
try:
self.include_sld = self._parse_bool(arg)
except ValueError as e:
print('Cannot set second-level domain status. %s' % str(e), file=sys.stderr)
return
print('Second-level domains are %s' % ('enabled' if self.include_sld else 'disabled'), file=sys.stderr)
def _partial_key_match(self, map, partial):
partial = partial.casefold()
keys = list()
values = set()
for key, value in map.items():
if key.startswith(partial):
keys.append(key)
values.add(value)
if len(values) == 0:
raise ValueError('Expected any of %s' % str(list(map.keys())))
elif len(values) > 1:
raise ValueError('"%s" may refer to any of %s' % (partial, str(keys)))
return next(iter(values))
def _parse_bool(self, text):
return self._partial_key_match(DomainCmd.BOOLEANS, text)
def do_sort(self, args):
args = args.split()
if len(args) > 2:
print('Too many arguments', file=sys.stderr)
return
if len(args) > 0:
try:
new_sorting = self._partial_key_match(DomainCmd.SORTING_NAMES, args[0])
except ValueError as e:
print('Cannot parse mode. %s' % str(e), file=sys.stderr)
return
new_ascending = True
if len(args) > 1:
try:
new_ascending = self._partial_key_match(DomainCmd.SORTING_DIRECTION, args[1])
except ValueError as e:
print('Cannot parse direction. %s' % str(e), file=sys.stderr)
return
self.sorting = new_sorting
self.sort_ascending = new_ascending
print('Sorting by %s %s' % (self.sorting.name.lower(), 'ascending' if self.sort_ascending else 'descending'))
def do_updatetld(self, arg):
self._fetch_tlds()
def _fetch_tlds(self):
print('Fetching TLD list... ', file=sys.stderr, end='', flush=True)
try:
page = lxml.html.fromstring(requests.get('https://www.ovh.es/dominios/precios/').content)
tlds = []
for extensionTr in page.xpath("//table[@id='dataTable']/tbody/tr"):
tldTd, buyTd, renewTd = extensionTr.findall("td")[:3]
tldName = tldTd.find("a").text_content().strip().strip('.').lower()
buyPrice = float(buyTd.attrib['data-order'])
renewPrice = float(renewTd.attrib['data-order'])
tlds.append(DomainInfo(tldName, buyPrice, renewPrice))
tlds.sort(key=lambda x: x.name)
print('got %d' % len(tlds), file=sys.stderr)
self.all_tlds = tlds
return True
except Exception as e:
print('cannot fetch', file=sys.stderr)
traceback.print_last()
return False
def do_tld(self, arg):
self.do_tlds(None)
def do_tlds(self, arg):
tlds = self._get_valid_tlds()
if tlds:
self._sort_domain_list(tlds)
self._print_domain_header()
for tld in tlds:
self._print_domain_entry(tld)
def _get_valid_tlds(self):
if self.all_tlds is None:
if not self._fetch_tlds():
return None
return [tld for tld in self.all_tlds if self._tld_valid(tld)]
def _tld_valid(self, tld):
if not self.include_sld and '.' in tld.name:
return False
if not self.include_intl and re.search(r'[^a-z.]', tld.name):
return False
if self.max_length is not None and len(tld.name) > self.max_length:
return False
if self.max_order is not None and tld.order > self.max_order:
return False
if self.max_renew is not None and tld.renew > self.max_renew:
return False
return True
def do_hack(self, arg):
names = arg.split()
if len(names) == 0:
print('At least one argument should be provided', file=sys.stderr)
return
to_check = self._domain_hack_list(names)
if to_check is not None:
print(', '.join(to_check))
def _domain_hack_list(self, names):
valid_tlds = self._get_valid_tlds()
if not valid_tlds:
print('Unable to get valid TLDs', file=sys.stderr)
return None
to_check = set()
for tld in valid_tlds:
tldend = tld.name.replace('.', '')
for name in names:
name = name.casefold()
if len(name) > len(tldend) and name.endswith(tldend):
to_check.add('%s.%s' % (name[:-len(tldend)], tld.name))
return sorted(to_check)
def do_check(self, arg):
to_check = self._domain_check_list(arg.split())
if not to_check:
return
self._check_list(to_check)
def do_hackcheck(self, arg):
to_check = self._domain_hack_list(arg.split())
if not to_check:
return
self._check_list(to_check)
def _check_list(self, to_check):
# Reset variables
if self.sorting == Sorting.ALPHABETIC:
self._print_domain_header()
self._run_domain_threads(self._check_and_update, to_check)
else:
self.domain_info = []
self.failed_domains = 0
self._run_domain_threads(self._check_and_update_sorted, to_check)
if not self.check_aborted:
self._sort_domain_list(self.domain_info)
self._print_process()
self._print_domain_header()
for info in self.domain_info:
self._print_domain_entry(info)
def _run_domain_threads(self, func, to_check):
self.check_aborted = False
executor = ThreadPoolExecutor(max_workers=10)
for domain in to_check:
try:
executor.submit(func, domain)
except KeyboardInterrupt:
print('Aborting, hold on...', file=sys.stderr)
self.check_aborted = True
break
while True:
try:
executor.shutdown()
break
except KeyboardInterrupt:
print('Aborting, hold on...', file=sys.stderr)
self.check_aborted = True
pass
def _print_process(self, line=None):
print(DomainCmd.RESETLINE, file=sys.stderr, end='', flush=True)
if line:
print(line, file=sys.stderr, end='', flush=True)
def _print_domain_header(self):
print('domain\trenew\torder')
def _print_domain_entry(self, info):
print('%s\t%.2f\t%.2f' % (info.name, info.renew, info.order))
def _domain_check_list(self, domains):
to_check = set()
valid_tlds = None
for domain in domains:
encoded = domain.encode('idna').decode('ascii').lower()
if not re.match(r'([a-z0-9]([a-z0-9-]*[a-z0-9])?\.)*[a-z0-9]([a-z0-9-]*[a-z0-9])?$', encoded):
print('Invalid domain "%s"' % domain, file=sys.stderr)
continue
if '.' in encoded:
to_check.add(encoded)
continue
if valid_tlds is None:
valid_tlds = self._get_valid_tlds()
if valid_tlds is None:
print('Cannot check %s' % domain, file=sys.stderr)
return None
for tld in valid_tlds:
to_check.add('%s.%s' % (encoded, tld.name))
return sorted(to_check)
def _check_and_update(self, domain):
if self.check_aborted:
return
try:
info = self._check_domain_status(domain)
except Exception as e:
with self.print_lock:
traceback.print_last()
info = None
with self.print_lock:
if info is not None:
self._print_domain_entry(info)
def _check_and_update_sorted(self, domain):
if self.check_aborted:
return
with self.print_lock:
self._print_process('%i/%i: %s' % (len(self.domain_info), self.failed_domains, domain))
try:
info = self._check_domain_status(domain)
except Exception as e:
with self.print_lock:
traceback.print_last()
info = None
with self.data_lock:
if info is not None:
self.domain_info.append(info)
else:
self.failed_domains += 1
def _check_domain_status(self, domain):
if not self._refresh_cart_id():
return None
params = {
'domain': domain
}
info = requests.get('https://www.ovh.es/engine/apiv6/order/cart/%s/domain' % self.cart_id, params=params).json()
# Get first (and only) offer
try:
info = info[0]
except:
return None
# Skip if not available
if not info['orderable'] or info['action'] != 'create':
return None
# Extract price
orderprice, renewprice = None, None
for price in info['prices']:
if price['label'] == 'TOTAL':
orderprice = price['price']['value']
elif price['label'] == 'RENEW':
renewprice = price['price']['value']
# Skip if any pricing information is not available
if orderprice is None or renewprice is None:
return None
return DomainInfo(domain, orderprice, renewprice)
def _refresh_cart_id(self):
if self.cart_time is None or datetime.utcnow() - self.cart_time >= DomainCmd.CART_TIMEOUT:
with self.cart_lock:
if self.cart_time is None or datetime.utcnow() - self.cart_time >= DomainCmd.CART_TIMEOUT:
return self._fetch_cart_id()
return True
def _fetch_cart_id(self):
cart_id_response = requests.post('https://www.ovh.es/engine/apiv6/order/cart', json={'description': '_ovhcom_legacy_order_cart_', 'ovhSubsidiary': 'ES'})
try:
cart_id_response.raise_for_status()
except Exception as e:
print('Could not get cart ID', file=sys.stderr)
traceback.print_last()
return False
self.cart_id = cart_id_response.json()['cartId']
self.cart_time = datetime.utcnow()
return True
def _sort_domain_list(self, domains):
if self.sorting == Sorting.ALPHABETIC:
func = lambda x: x.name
elif self.sorting == Sorting.PRICE:
func = lambda x: max(x.renew, x.order)
elif self.sorting == Sorting.RENEW:
func = lambda x: x.renew
elif self.sorting == Sorting.ORDER:
func = lambda x: x.order
else:
raise Exception('What the fuck %s' % str(self.sorting))
domains.sort(key=func)
if not self.sort_ascending:
domains.reverse()
def do_exit(self, arg):
self.save_config()
sys.exit(0)
def do_quit(self, arg):
self.save_config()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Queries domain status using OVH\'s API.')
parser.add_argument('--noconfig', help='Disables user configuration', action='store_true')
args = parser.parse_args()
configFile = None
if not args.noconfig:
if os.name == 'nt':
configFile = os.path.join(os.getenv('APPDATA'), 'ovhdomainquery.ini')
else:
configFile = os.path.expanduser('~/.ovhdomainquery.ini')
cmd = DomainCmd(configFile)
cmd.load_config()
cmd.cmdloop()
|
import logging
import pandas as pd
from utils.file_handling import write_debug_file
from expworkup.handlers.cleaner import cleaner
from expworkup.handlers.chemical_types import get_unique_chemicals_types_byinstance, runuid_feat_merge
from utils.globals import get_debug_simple
modlog = logging.getLogger(f'mainlog.{__name__}')
warnlog = logging.getLogger(f'warning.{__name__}')
def construct_2d_view(report_df,
amounts_df,
inchi_key_indexed_features_df,
ratios_df,
calcs_df,
debug_bool,
raw_bool):
""" Combines the generated dataframes into a single 2d csv for export
Parameters
----------
report_df : pandas.DataFrame
2d dataframe returned after parsing all content from google drive
returned from expworkup.json_pipeline
amounts_df : pandas.DataFrame of concatenated calculations
does not include the report_df. Includes, chemical types, values
indexed on runUID
inchi_key_indexed_features_df : pandas.DataFrame
all features selected by user in type_command.csv indexed on InchiKey
headers will conform to the type_command.csv unless mismatched
(mismatch occurs when the requested chemaxon features generate multiples)
ratios_df : pandas.DataFrame
calculated ratios of molarity from the _calc_ pipeline
indexed on runUID ('name')
columns are the ratio headers e.g. '_calc_ratio_acid_molarity_inorganic_molarity'
calcs_df : pd.DataFrame
completed _calcs_ specified by the ./utils/calc_command.py file
indexed on runUID ('name')
columns are the values return from _calcs_
debug_bool : CLI argument, True=Enable debugging
if toggled on, code will export CSV files of each dataframe
raw_bool_cli : Bool, from CLI, include all columns?
True will enable even improperly labeled columns to be exported
proper labels can be defined in 'dataset_rename.json'
Returns
-------
final_df : pandas.DataFrame with default view of data
default view also removes all nan columns and all '0' columns
TODO: add additional views (likely better in v3 though...)
Notes
-----
NOTE: An easy way to get a view of each of the large dataframes is to add
'--debug 1' to the CLI! Each render will be cast to a simlar named csv.
Search for name for associated code or vice-versa.
"""
modlog.info("Generating 2d dataframe")
print(f'Exporting 2d Dataframe...')
# Some final exports for ETL in V3
res = amounts_df.pivot_table(index=['name','inchikey'],
values=['molarity'],
columns=['main_type'],
aggfunc='sum')
res.columns = res.columns.droplevel(0) # remove molarity top level
sumbytype_molarity_df = res.groupby(level=0).sum() ##**
if debug_bool:
sumbytype_molarity_df_file = 'REPORT_MOLARITY_BYTYPE_CALCS.csv'
write_debug_file(sumbytype_molarity_df, sumbytype_molarity_df_file)
sumbytype_molarity_df = sumbytype_molarity_df.add_prefix('_rxn_molarity_')
sumbytype_byinstance_molarity_df = get_unique_chemicals_types_byinstance(res) ##**
sumbytype_byinstance_molarity_df = \
sumbytype_byinstance_molarity_df.add_prefix('_raw_')
if debug_bool:
sumbytype_byinstance_molarity_df_file = \
'REPORT_MOLARITY_BYTYPE_BYINSTANCE_CALCS.csv'
write_debug_file(sumbytype_byinstance_molarity_df,
sumbytype_byinstance_molarity_df_file)
feats_df = runuid_feat_merge(sumbytype_byinstance_molarity_df,
inchi_key_indexed_features_df)
# Generate a _raw_mmol_inchikey value for each inchikey in dataset
mmol_inchi_df = amounts_df.pivot_table(index=['name'],
values=['mmol'],
columns=['inchikey'],
aggfunc='sum')
mmol_inchi_df.columns = mmol_inchi_df.columns.droplevel(0) # remove 'mmol' top level
mmol_inchi_df = mmol_inchi_df.add_prefix('_raw_mmol_')
mmol_inchi_df.fillna(value=0, inplace=True, axis=1)
molarity_inchi_df = amounts_df.pivot_table(index=['name'],
values=['molarity'],
columns=['inchikey'],
aggfunc='sum')
molarity_inchi_df.columns = molarity_inchi_df.columns.droplevel(0) # remove 'molarity' top level
molarity_inchi_df = molarity_inchi_df.add_prefix('_raw_molarity_')
molarity_inchi_df.fillna(value=0, inplace=True, axis=1)
# add new targets as validated through pipeline and prepared
# Should ideally have runid_vial ('name') as the index
additional_default_dfs = [mmol_inchi_df,
molarity_inchi_df,
sumbytype_molarity_df,
feats_df,
ratios_df,
calcs_df]
escalate_final_df = report_df
escalate_final_df.set_index('name', drop=True, inplace=True)
for num, dataframe in enumerate(additional_default_dfs):
try:
dataframe.set_index('name', drop=True, inplace=True)
modlog.info(f'{num} in additional dataframes reindexed by runid_vial')
except KeyError:
modlog.info(f'{num} in additional dataframes already correctly indexed')
escalate_final_df = escalate_final_df.join(dataframe)
escalate_final_df.drop_duplicates(keep='first', inplace=True)
final_df = cleaner(escalate_final_df, raw_bool)
start_count = final_df.shape[1]
if get_debug_simple():
# Remove all columns that are entirely '0' or 'null'
# Even if all the values are ACTUALLY 0, there is no variance, wgaf?
modlog.info(f'ETL was enabled through the CLI "--etl" option, no columns were removed from final dataframe')
print(f'ETL was enabled through the CLI "--etl" option, no columns were removed from final dataframe')
else:
condition_1 = (final_df == 0).all()
final_df = final_df.loc[:, ~condition_1]
condition_2 = (final_df.astype(str) == 'null').all()
final_df = final_df.loc[:, ~condition_2]
end_count = final_df.shape[1]
modlog.info(f'Removed {start_count-end_count} of an original {start_count} columns which contained only "0" or "null"')
print(f'Removed {start_count-end_count} of an original {start_count} columns which contained only "0" or "null"')
modlog.info('successfully generated mmol and molarity dataframes for calcs')
# TODO: cleanup documentation and export pipeline for statesets
# TODO: create final export of a 2d CSV file from the data above
return final_df
|
from random import randint
jog = int(input('Arrisque um numero: '))
comp = randint(1, 10)
tot = 1
while jog != comp:
print('Errou tente novamente!!')
jog = int(input('Arrique um numero: '))
tot += 1
if jog < comp:
print('Um pouco mais')
if jog > comp:
print('Um pouco menos')
print(f'Parabens você acertou o computador jogou {comp} e você {jog}')
print(f'Voce acertou em {tot} tentativas')
|
import xml.parsers.expat
NC_NS_URI ="urn:ietf:params:xml:ns:netconf:base:1.0"
CAPABILITIES = {
"urn:ietf:params:xml:ns:netconf:base:1.0" : "base",
"urn:ietf:params:netconf:base:1.1" : "base",
"urn:ietf:params:netconf:capability:writable-running:1.0" : "writable-running",
"urn:ietf:params:netconf:capability:candidate:1.0" : "candidate",
"urn:ietf:params:netconf:capability:startup:1.0" : "startup",
"urn:ietf:params:netconf:capability:url:1.0" : "url",
"urn:ietf:params:netconf:capability:xpath:1.0" : "xpath",
"urn:ietf:params:netconf:capability:notification:1.0" : "notification",
"urn:ietf:params:netconf:capability:with-defaults:1.0" : "with-defaults",
}
class Capability:
def __init__(self, uri):
self.parameters = {}
if "?" in uri:
id_, pars = uri.split("?")
self.parse_pars(pars)
else:
id_ = uri
self.id = id_
def parse_pars(self,pars):
for p in pars.split("&"):
name, value=p.split("=")
self.parameters[name] = value
class HelloParser:
def __init__(self):
self.capabilities = []
self.depth = self.state = 0
self.buffer = ""
self.parser = xml.parsers.expat.ParserCreate(namespace_separator=' ')
self.parser.CharacterDataHandler = self.handleCharData
self.parser.StartElementHandler = self.handleStartElement
self.parser.EndElementHandler = self.handleEndElement
def handleCharData(self, data):
if self.state == self.depth == 3:
self.buffer += data
def handleStartElement(self, data, attrs):
ns_uri, tag = data.split()
if ns_uri == NC_NS_URI:
if self.state == self.depth == 0 and tag == "hello":
self.state = 1
elif self.state == self.depth == 1 and tag == "capabilities":
self.state = 2
elif self.state == self.depth == 2 and tag == "capability":
self.state = 3
self.depth += 1
def handleEndElement(self, data):
ns_uri, tag = data.split()
if ns_uri == NC_NS_URI:
if self.state == self.depth == 1 and tag == "hello":
self.state = 0
elif self.state == self.depth == 2 and tag == "capabilities":
self.state = 1
elif self.state == self.depth == 3 and tag == "capability":
self.capabilities.append(Capability(self.buffer))
self.buffer = ""
self.state = 2
self.depth -= 1
def parse(self, fd):
self.parser.ParseFile(fd)
return self
def yang_modules(self):
"""
Return a list of advertised YANG module names with revisions.
Avoid repeated modules.
"""
res = {}
for c in self.capabilities:
m = c.parameters.get("module")
if m is None or m in res.keys():
continue
res[m] = c.parameters.get("revision")
return res.items()
def yang_implicit_deviation_modules(self):
"""
Return an iterable of deviations to YANG modules which are referenced
but not explicitly advertised as a module.
"""
deviations = set()
advertised_modules = set(dict(self.yang_modules()).keys())
for c in self.capabilities:
deviation_string = c.parameters.get("deviations")
if not deviation_string:
continue
for deviation in deviation_string.split(","):
if not deviation or deviation in advertised_modules:
continue
deviations.add(deviation)
return deviations
def get_features(self, yam):
"""Return list of features declared for module `yam`."""
mcap = [ c for c in self.capabilities
if c.parameters.get("module", None) == yam ][0]
features = mcap.parameters.get("features")
return features.split(",") if features else []
def registered_capabilities(self):
"""Return dictionary of non-YANG capabilities.
Only capabilities from the `CAPABILITIES` dictionary are taken
into account.
"""
return dict ([ (CAPABILITIES[c.id],c) for c in self.capabilities
if c.id in CAPABILITIES ])
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
import random
from matplotlib.patches import Rectangle
# Usage: python print_solution.py -f <filename>
parser = argparse.ArgumentParser(description='Argument parser')
parser.add_argument("-f", "--file_name", help="Filename", required = True, type=str)
args = parser.parse_args()
def main():
file = open(args.file_name,"r")
# Read the first line which contains the width and the height of the paper roll
first_line = file.readline().strip().split(" ")
width = int(first_line[0])
height = int(first_line[1])
# Read the second line which contains the number of necessary pieces of paper to cut off
number_of_pieces = int(file.readline().strip())
# Read all the remaining lines which contains the horizontal and vertical dimensionof the i-th piece of paper
remaining_lines = file.readlines()
# To remove empty lines
remaining_lines = [line.strip() for line in remaining_lines if line.strip()]
pieces = []
colors = ['blue','yellow','red','darkorange','forestgreen','cornflowerblue','silver','tan','lime','dimgrey','aqua','olive','deeppink','violet','chocolate','skyblue','greenyellow','sandybrown','springgreen','orange','brown','darkred','purple','pink','indigo','slateblue','lightsteelblue','lightslategray','moccasin','burlywood']
for i,line in enumerate(remaining_lines):
line = line.split()
pieces.append([int(line[0]),int(line[1]),int(line[2]),int(line[3])])
fig = plt.figure(figsize=(5 + (width//8) ,5 + (height//8)))
ax = fig.gca(title = "Solution")
for i in range(number_of_pieces):
color = colors[i]
sq = Rectangle((pieces[i][2],pieces[i][3]),pieces[i][0],pieces[i][1],fill = True,color=color, alpha=.3 )
ax.add_patch(sq)
plt.plot()
plt.xticks(ticks=range(0,width+1))
plt.yticks(ticks=range(0,height+1))
plt.grid(color='black')
plt.show()
file.close()
if __name__ == "__main__":
main()
|
# @l2g 1760 python3
# [1760] Minimum Limit of Balls in a Bag
# Difficulty: Medium
# https://leetcode.com/problems/minimum-limit-of-balls-in-a-bag
#
# You are given an integer array nums where the ith bag contains nums[i] balls.
# You are also given an integer maxOperations.
# You can perform the following operation at most maxOperations times:
#
# Take any bag of balls and divide it into two new bags with a positive number of balls.
#
#
# For example,a bag of 5 balls can become two new bags of 1 and 4 balls,
# or two new bags of 2 and 3 balls.
#
#
#
# Your penalty is the maximum number of balls in a bag.
# You want to minimize your penalty after the operations.
# Return the minimum possible penalty after performing the operations.
#
# Example 1:
#
# Input: nums = [9], maxOperations = 2
# Output: 3
# Explanation:
# - Divide the bag with 9 balls into two bags of sizes 6 and 3. [9] -> [6,3].
# - Divide the bag with 6 balls into two bags of sizes 3 and 3. [6,3] -> [3,3,3].
# The bag with the most number of balls has 3 balls, so your penalty is 3 and you should return 3.
#
# Example 2:
#
# Input: nums = [2,4,8,2], maxOperations = 4
# Output: 2
# Explanation:
# - Divide the bag with 8 balls into two bags of sizes 4 and 4. [2,4,8,2] -> [2,4,4,4,2].
# - Divide the bag with 4 balls into two bags of sizes 2 and 2. [2,4,4,4,2] -> [2,2,2,4,4,2].
# - Divide the bag with 4 balls into two bags of sizes 2 and 2. [2,2,2,4,4,2] -> [2,2,2,2,2,4,2].
# - Divide the bag with 4 balls into two bags of sizes 2 and 2. [2,2,2,2,2,4,2] -> [2,2,2,2,2,2,2,2].
# The bag with the most number of balls has 2 balls, so your penalty is 2 an you should return 2.
#
# Example 3:
#
# Input: nums = [7,17], maxOperations = 2
# Output: 7
#
#
# Constraints:
#
# 1 <= nums.length <= 10^5
# 1 <= maxOperations, nums[i] <= 10^9
#
#
from typing import List
class Solution:
def get_min_operation(self, nums: List[int], cost: int) -> int:
ret: int = 0
for n in nums:
ret += (n // cost) - int((n % cost) == 0)
return ret
def minimumSize(self, nums: List[int], maxOperations: int) -> int:
start, end = 1, max(nums) # min and maximum cost possible
ret: int = end
# binary search
while start < end:
mid = (start + end) // 2
if self.get_min_operation(nums, mid) <= maxOperations:
end = ret = mid
else:
start = (mid) + 1
return ret
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_1760.py")])
|
from tenark.identifier import Identifier
def test_identifier_methods():
abstract_methods = Identifier.__abstractmethods__ # type: ignore
assert 'generate_id' in abstract_methods
|
import json
import threading as thread
import scapy.all as sc
def get_net_config():
"""
Get json configuration file and trusted mac addresses
Returns:
dictionary: list of mac_address and corresponding given name
String : IP configuration for your network with CIDR
String : Broadcast address default is ff:ff:ff:ff:ff:ff
String : Ethernet interface name
"""
with open('data.json') as load_file:
data = json.load(load_file)
return data["mac_list"], data["ip"], data["broadcast"], data["iface"], data["router_data"]
def check_connected(ip_search: str, mac_list, mac_broad: str = "ff:ff:ff:ff:ff:ff"):
"""
Check all devices connected on your WiFi
Args:
ip_search (str): IP Range. Format: ip/cidr
mac_list (dictionary): List of Trusted MAC Addresses.
mac_broad (str, optional): Broadcast Address. Defaults to "ff:ff:ff:ff:ff:ff".
Returns:
dictionary: List of unknown MAC addresses
"""
unknown = {}
req = sc.ARP(pdst=ip_search)
etherpacket = sc.Ether(dst=mac_broad)
broadcast_packet = etherpacket / req
# or you can use this: sc.arping("ip/cidr", verbose=0)
recv_data = sc.srp(broadcast_packet, timeout=2, verbose=False)[0]
for sent_recv in recv_data:
return_packet = sent_recv[1]
if return_packet.hwsrc not in mac_list.values():
unknown[return_packet.psrc] = return_packet.hwsrc
return unknown
def block_mac(target_ip: str, target_mac: str, gateway_ip: str):
"""
Completely Block a Mac Address from connecting.
Args:
target_ip (str): IP of target device.
target_mac (str): MAC of target device.
gateway_ip (str): Gateway IP or your Router.
"""
bad_mac = "12:34:56:78:9A:BC"
packet = sc.ARP(op=2, psrc=gateway_ip, hwsrc=bad_mac,
pdst=target_ip, hwdst=target_mac)
sc.send(packet, verbose=0)
def allow_mac(target_ip: str, target_mac: str, router_ip: str, router_mac: str):
"""
Restore connection of the blocked MAC address.
Args:
target_ip (str): IP of target device.
target_mac (str): MAC address of target device.
router_ip (str): Gateway IP.
router_mac (str): Gateway MAC address.
"""
packet = sc.ARP(op=2, psrc=router_ip, hwsrc=router_mac,
pdst=target_ip, hwdst=target_mac)
sc.send(packet, verbose=1)
def disconnect_device(router_mac: str, target_mac: str, iface: str, count: int):
"""
Force deauthenticate a device.
Args:
router_mac (str): Gateway MAC address.
target_mac (str): MAC address of target device.
iface (str): Ethernet Interface Name.
count (int): Number of packets to be sent.
"""
if count == 0:
count = None
dot11 = sc.Dot11(type=0, subtype=12, addr1=target_mac,
addr2=router_mac, addr3=router_mac)
packet = sc.RadioTap()/dot11/sc.Dot11Deauth(reason=7)
sc.sendp(packet, inter=0.1, count=count, iface=iface, verbose=0)
def is_int(check: int):
"""
Check if value is int
Args:
check (int): value to be checked
Returns:
Bool: If value is int true, else false.
"""
try:
int(check)
return True
except ValueError:
return False
def main():
"""
Run through all connected device and ask for your confirmation if for Block or disregard.
"""
mac_adds, ip, mac_broadcast, iface, router_data = get_net_config()
unknown_mac = check_connected(ip, mac_adds, mac_broadcast)
for ip, mac in unknown_mac.items():
disconnect = input(f'Block this mac {mac}?(Y/N)')
if disconnect.upper() == "Y":
print("Blocking~")
count = input("How many packets?(1 - 100) ")
if is_int(count):
blocking_thread = thread.Thread(
target=disconnect_device, name="Blocking", args=(router_data[0], mac, iface, int(count)))
# blocking_thread.start()
else:
print("Not int")
continue
elif disconnect.upper() == "P":
print("Poisoning~")
poison_thread = thread.Thread(
target=block_mac, name="Aurora", args=(ip, mac, router_data[1]))
# poison_thread.start()
else:
print("Ooookay~")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
checkboxes
~~~~~~~~~
TODO: explain this :)
"""
import re
from UserList import UserList
import vim
from orgmode.liborgmode.base import MultiPurposeList, flatten_list
from orgmode.liborgmode.orgdate import OrgTimeRange
from orgmode.liborgmode.orgdate import get_orgdate
from orgmode.liborgmode.dom_obj import DomObj, DomObjList, REGEX_SUBTASK, REGEX_SUBTASK_PERCENT, REGEX_HEADING, REGEX_CHECKBOX
class Checkbox(DomObj):
u""" Structural checkbox object """
STATUS_ON = u'[X]'
STATUS_OFF = u'[ ]'
# intermediate status
STATUS_INT = u'[-]'
def __init__(self, level=1, type=u'-', title=u'', status=u'[ ]', body=None):
u"""
:level: Indent level of the checkbox
:type: Type of the checkbox list (-, +, *)
:title: Title of the checkbox
:status: Status of the checkbox ([ ], [X], [-])
:body: Body of the checkbox
"""
DomObj.__init__(self, level=level, title=title, body=body)
# heading
self._heading = None
self._children = CheckboxList(obj=self)
self._dirty_checkbox = False
# list type
self._type = u'-'
if type:
self.type = type
# status
self._status = Checkbox.STATUS_OFF
if status:
self.status = status
def __unicode__(self):
return u' ' * self.level + self.type + u' ' + \
(self.status + u' ' if self.status else u'') + self.title
def __str__(self):
return self.__unicode__().encode(u'utf-8')
def __len__(self):
# 1 is for the heading's title
return 1 + len(self.body)
def copy(self, including_children=True, parent=None):
u"""
Create a copy of the current checkbox. The checkbox will be completely
detached and not even belong to a document anymore.
:including_children: If True a copy of all children is create as
well. If False the returned checkbox doesn't
have any children.
:parent: Don't use this parameter. It's set
automatically.
"""
checkbox = self.__class__(
level=self.level, title=self.title,
body=self.body[:])
if parent:
parent.children.append(checkbox)
if including_children and self.children:
for item in self.children:
item.copy(
including_children=including_children,
parent=checkbox)
checkbox._orig_start = self._orig_start
checkbox._orig_len = self._orig_len
checkbox._dirty_heading = self.is_dirty_checkbox
return checkbox
@classmethod
def parse_checkbox_from_data(cls, data, heading=None, orig_start=None):
u""" Construct a new checkbox from the provided data
:data: List of lines
:heading: The heading object this checkbox belongs to
:orig_start: The original start of the heading in case it was read
from a document. If orig_start is provided, the
resulting heading will not be marked dirty.
:returns: The newly created checkbox
"""
def parse_title(heading_line):
# checkbox is not heading
if REGEX_HEADING.match(heading_line) is not None:
return None
m = REGEX_CHECKBOX.match(heading_line)
if m:
r = m.groupdict()
return (len(r[u'level']), r[u'type'], r[u'status'], r[u'title'])
return None
if not data:
raise ValueError(u'Unable to create checkbox, no data provided.')
# create new checkbox
nc = cls()
nc.level, nc.type, nc.status, nc.title = parse_title(data[0])
nc.body = data[1:]
if orig_start is not None:
nc._dirty_heading = False
nc._dirty_body = False
nc._orig_start = orig_start
nc._orig_len = len(nc)
if heading:
nc._heading = heading
return nc
def update_subtasks(self, total=0, on=0):
if total != 0:
percent = (on * 100) / total
else:
percent = 0
count = "%d/%d" % (on, total)
self.title = REGEX_SUBTASK.sub("[%s]" % (count), self.title)
self.title = REGEX_SUBTASK_PERCENT.sub("[%d%%]" % (percent), self.title)
d = self._heading.document.write_checkbox(self, including_children=False)
@classmethod
def identify_checkbox(cls, line):
u""" Test if a certain line is a checkbox or not.
:line: the line to check
:returns: indent_level
"""
# checkbox is not heading
if REGEX_HEADING.match(line) is not None:
return None
m = REGEX_CHECKBOX.match(line)
if m:
r = m.groupdict()
return len(r[u'level'])
return None
@property
def is_dirty(self):
u""" Return True if the heading's body is marked dirty """
return self._dirty_checkbox or self._dirty_body
@property
def is_dirty_checkbox(self):
u""" Return True if the heading is marked dirty """
return self._dirty_checkbox
def get_index_in_parent_list(self):
""" Retrieve the index value of current checkbox in the parents list of
checkboxes. This works also for top level checkboxes.
:returns: Index value or None if heading doesn't have a
parent/document or is not in the list of checkboxes
"""
if self.parent:
return super(Checkbox, self).get_index_in_parent_list()
elif self.document:
l = self.get_parent_list()
if l:
return l.index(self)
def get_parent_list(self):
""" Retrieve the parents' list of headings. This works also for top
level headings.
:returns: List of headings or None if heading doesn't have a
parent/document or is not in the list of headings
"""
if self.parent:
return super(Checkbox, self).get_parent_list()
elif self.document:
if self in self.document.checkboxes:
return self.document.checkboxes
def set_dirty(self):
u""" Mark the heading and body dirty so that it will be rewritten when
saving the document """
self._dirty_checkbox = True
self._dirty_body = True
if self._document:
self._document.set_dirty_document()
def set_dirty_checkbox(self):
u""" Mark the checkbox dirty so that it will be rewritten when saving the
document """
self._dirty_checkbox = True
if self._document:
self._document.set_dirty_document()
@property
def previous_checkbox(self):
u""" Serialized access to the previous checkbox """
return super(Checkbox, self).previous_item
@property
def next_checkbox(self):
u""" Serialized access to the next checkbox """
return super(Checkbox, self).next_item
@property
def first_checkbox(self):
u""" Access to the first child heading or None if no children exist """
if self.children:
return self.children[0]
@property
def start(self):
u""" Access to the starting line of the checkbox """
if self.document is None:
return self._orig_start
# static computation of start
if not self.document.is_dirty:
return self._orig_start
# dynamic computation of start, really slow!
def compute_start(h):
if h:
return len(h) + compute_start(h.previous_checkbox)
return compute_start(self.previous_checkbox)
def toggle(self):
u""" Toggle status of this checkbox """
if self.status == Checkbox.STATUS_OFF:
self.status = Checkbox.STATUS_ON
else:
self.status = Checkbox.STATUS_OFF
self.set_dirty()
def all_siblings(self):
if not self.parent:
p = self._heading
else:
p = self.parent
if not p.children:
raise StopIteration()
c = p.first_checkbox
while c:
yield c
c = c.next_sibling
raise StopIteration()
def all_children(self):
if not self.children:
raise StopIteration()
c = self.first_checkbox
while c:
yield c
for d in c.all_children():
yield d
c = c.next_sibling
raise StopIteration()
def all_siblings_status(self):
u""" Return checkboxes status for currnet checkbox's all siblings
:return: (total, on)
total: total # of checkboxes
on: # of checkboxes which are on
"""
total, on = 0, 0
for c in self.all_siblings():
if c.status is not None:
total += 1
if c.status == Checkbox.STATUS_ON:
on += 1
return (total, on)
def are_children_all(self, status):
u""" Check all children checkboxes status """
clen = len(self.children)
for i in range(clen):
if self.children[i].status != status:
return False
# recursively check children's status
if not self.children[i].are_children_all(status):
return False
return True
def is_child_one(self, status):
u""" Return true, if there is one child with given status """
clen = len(self.children)
for i in range(clen):
if self.children[i].status == status:
return True
return False
def are_siblings_all(self, status):
u""" Check all sibling checkboxes status """
for c in self.all_siblings():
if c.status != status:
return False
return True
def level():
u""" Access to the checkbox indent level """
def fget(self):
return self._level
def fset(self, value):
self._level = int(value)
self.set_dirty_checkbox()
def fdel(self):
self.level = None
return locals()
level = property(**level())
def title():
u""" Title of current checkbox """
def fget(self):
return self._title.strip()
def fset(self, value):
if type(value) not in (unicode, str):
raise ValueError(u'Title must be a string.')
v = value
if type(v) == str:
v = v.decode(u'utf-8')
self._title = v.strip()
self.set_dirty_checkbox()
def fdel(self):
self.title = u''
return locals()
title = property(**title())
def status():
u""" status of current checkbox """
def fget(self):
return self._status
def fset(self, value):
self._status = value
self.set_dirty()
def fdel(self):
self._status = u''
return locals()
status = property(**status())
def type():
u""" type of current checkbox list type """
def fget(self):
return self._type
def fset(self, value):
self._type = value
def fdel(self):
self._type = u''
return locals()
type = property(**type())
class CheckboxList(DomObjList):
u"""
Checkbox List
"""
def __init__(self, initlist=None, obj=None):
"""
:initlist: Initial data
:obj: Link to a concrete Checkbox or Document object
"""
# it's not necessary to register a on_change hook because the heading
# list will itself take care of marking headings dirty or adding
# headings to the deleted headings list
DomObjList.__init__(self, initlist, obj)
@classmethod
def is_checkbox(cls, obj):
return CheckboxList.is_domobj(obj)
def _get_heading(self):
if self.__class__.is_checkbox(self._obj):
return self._obj._document
return self._obj
# vim: set noexpandtab:
|
def main():
message = 'Hello World'
print(message)
if __name__ == '__main__':
main()
|
print('* ' * 4)
print('* ', ' ' * 2, sep='', end='*\n')
print('* ', ' ' * 2, sep='', end='*\n')
print('* ' * 4)
|
# =============================================================================
# Adds features to the data obtained from the simulation to train a machine
# learning model.
# =============================================================================
import pandas as pd
import json
import networkx as nx
import os
import re
import numpy as np
root_project = re.findall(r'(^\S*TFM)', os.getcwd())[0]
def get_data():
"""
Gets necesseray data for computing the features.
Returns
-------
graph : networkx.graph
df_info : pandas.DataFrame
alpha3_to_alpha2 : dictionary
"""
graph = nx.read_gpickle(
f'{root_project}/data/interim/routes_countries.gpickle')
df_info = pd.read_pickle(
f'{root_project}/data/interim/country_info_final.pickle')
with open(f'{root_project}/data/interim/alpha3_to_alpha2.txt', 'r') as file:
alpha3_to_alpha2 = json.load(file)
return graph, df_info, alpha3_to_alpha2
def feature_graph(df):
"""
Adds to the dataframe features about the graph that represents the connection
between countries.
Parameters
----------
df : pandas.DataFrame
Returns
-------
df : pandas.DataFrame
"""
data = get_data()
graph = data[0]
alpha3_to_alpha2 = data[2]
degree = dict(nx.degree_centrality(graph))
betw = nx.betweenness_centrality(graph)
closeness = nx.closeness_centrality(graph)
df['iso2'] = df['i_country'].map(alpha3_to_alpha2)
df['betweenness'] = df['iso2'].map(betw)
df['degree'] = df['iso2'].map(degree)
df['closeness'] = df['iso2'].map(closeness)
df.drop(labels='iso2', axis=1, inplace=True)
return df
def feature_pop(df):
"""
Adds to the dataframe the population about the initial country where the
disease begins.
Parameters
----------
df : pandas.DataFrame
Returns
-------
df : pandas.DataFrame
"""
df_info = get_data()[1]
dict_pop_country = df_info[['country_code', 'total_pop']].set_index(
'country_code').iloc[:, 0].to_dict()
df['country_pop'] = df['i_country'].map(dict_pop_country)
return df
def feature_total_dep(df):
"""
Adds to the dataframe the population about the initial country where the
disease begins.
Parameters
----------
df : pandas.DataFrame
Returns
-------
df : pandas.DataFrame
"""
df_info = get_data()[1]
df_info['total_departures'] = df_info['departures/day'].apply(
lambda x: np.array(list(x.values())).sum())
dict_total_dep = df_info[['country_code', 'total_departures']].set_index(
'country_code').iloc[:, 0].to_dict()
df['country_departures'] = df['i_country'].map(dict_total_dep)
return df
def feature_exposed_pop(df):
"""
Adds the total population of the countries to which an individual can travel
from the initial country. Is the population most exposed to the disease
apart from the initial country.
Parameters
----------
df : pandas.DataFrame
Returns
-------
df : pandas.DataFrame
"""
df_info = get_data()[1]
pop_dict = df_info[['country_code', 'total_pop']].set_index(
'country_code').iloc[:, 0].to_dict()
df_info['exposed_population'] = df_info['departures/day'].apply(
lambda x: np.array([pop_dict[country] for country in x.keys()]).sum())
exposed_dict = df_info[['country_code', 'exposed_population']].set_index(
'country_code').iloc[:, 0].to_dict()
df['exposed_pop'] = df['i_country'].map(exposed_dict)
return df
def feature_transf_log(df):
"""
Applies a logarithmic transformation to the fatures disesase.
Parameters
----------
df : pandas.DataFrame
Returns
-------
df : pandas.DataFrame
"""
# Replace 0 by a infinitesimal number to avoid -infinity
df['inf_pow_1_log'] = np.log(
df['inf_pow_1'].replace(
0, np.finfo(float).eps))
df['inf_pow_2_log'] = np.log(
df['inf_pow_2'].replace(
0, np.finfo(float).eps))
df['mort_pow_1_log'] = np.log(
df['mort_pow_1'].replace(
0, np.finfo(float).eps))
df['mort_pow_2_log'] = np.log(
df['mort_pow_2'].replace(
0, np.finfo(float).eps))
df['mort_pow_3_log'] = np.log(
df['mort_pow_3'].replace(
0, np.finfo(float).eps))
return df
def add_features(df):
"""
Adds all the features to the dataframe
Parameters
----------
df : pandas.DataFrame
Returns
-------
df : pandas.DataFrame
"""
df = feature_graph(df)
df = feature_pop(df)
df = feature_total_dep(df)
df = feature_exposed_pop(df)
df = feature_transf_log(df)
return df
if __name__ == '__main__':
df_v1 = pd.read_csv(
f"{root_project}/data/processed/simulation_results_v1.csv")
df_v2 = pd.read_csv(
f"{root_project}/data/processed/simulation_results_v2.csv")
# more simulated data
df_v1_rev = pd.read_csv(
f"{root_project}/data/processed/simulation_results_v1_rev.csv")
df_v2_rev = pd.read_csv(
f"{root_project}/data/processed/simulation_results_v2_rev.csv")
print(f"Total number of samples: {len(df_v1) + len(df_v2)}")
print(f"Total number of samples (rev): {len(df_v1) + len(df_v2) + len(df_v1_rev) + len(df_v2_rev)} ")
df_v1 = add_features(df_v1)
test_size= 120000
df_test = df_v1.iloc[:test_size] # Test set composed by v1
df_v1_train_val = df_v1.iloc[test_size:]
df_v2_train_val = add_features(df_v2)
df_train_val = pd.concat([df_v1_train_val,
df_v2_train_val],
ignore_index=True)
# Train-validation set with new data
df_train_val_rev = pd.concat([df_train_val,
add_features(df_v1_rev),
add_features(df_v2_rev)],
ignore_index=True)
# Train and validation set composed by v1 and v2
df_train_val = df_train_val.sample(frac=1).reset_index(drop=True)
df_train_val_rev = df_train_val_rev.sample(frac=1).reset_index(drop=True)
print(f"Test size: {df_test.shape[0]}")
print(f"Train validation size (v1): {df_v1_train_val.shape[0]}")
print(f"Train validation size (v2): {df_v2_train_val.shape[0]}")
df_test.to_pickle(
f"{root_project}/data/processed/test_set.pickle")
df_v1_train_val.to_pickle(
f"{root_project}/data/processed/train_val_set_v1.pickle")
df_v2_train_val.to_pickle(
f"{root_project}/data/processed/train_val_set_v2.pickle")
df_train_val.to_pickle(
f"{root_project}/data/processed/train_val_set.pickle")
df_train_val_rev.to_pickle(
f"{root_project}/data/processed/train_val_set_rev.pickle")
# Uncomment to make .csv files additionally to pickle
# df_test.to_csv(
# f"{root_project}/data/processed/test_set.csv", index=False)
# df_v1_train_val.to_csv(
# f"{root_project}/data/processed/train_val_set_v1.csv", index=False)
# df_v2_train_val.to_csv(
# f"{root_project}/data/processed/train_val_set_v2.csv", index=False)
# df_train_val_set.to_csv(
# f"{root_project}/data/processed/train_val_set.csv", index=False)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testcode for Android RPC.
To use it, start an RPC tracker with "python -m tvm.exec.rpc_tracker".
Use the tracker's address and port when configuring the RPC app.
Use "Adreno640" as the key if you wish to avoid modifying this script.
"""
import tvm
from tvm import te
import os
from tvm import rpc
from tvm.contrib import utils, ndk
import numpy as np
from tvm import te, topi, testing
from tvm.topi.testing import conv2d_nchw_python
import extern_op
# Set to be address of tvm proxy.
tracker_host = '127.0.0.1'
tracker_port = 9090
key = "MaliG76"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target_host = "llvm -mtriple=%s-linux-android" % arch
# whether enable to execute test on OpenCL target
test_opencl = True
# whether enable to execute test on Vulkan target
print(os.getpid())
test_vulkan = False
def conv2d_no_batching(N, H, W, CO, CI, KH, KW, stride, padding):
assert N == 1, "Only consider batch_size = 1 in this template"
#cfg = autotvm.get_config()
#cfg.define_knob("idtype",[0,1])
#cfg.define_knob("kdtype",[0,1])
#cfg.define_knob("wdtype",[0,2])
typedict = {0:"float32",1:"climgfloatr32",2:"climgfloatw32"}
# Algorithm
ddtype = "climgfloatr32"
batch = N
in_channel = CI
out_channel = CO
in_size = H
kernel = KW
pad = padding[0]
stride = stride[0]
open_image=1
ddtype='float32'
if open_image == 1:
ddtype = "climgfloatr32"
# Algorithm
PACK4 = 4
#PACK2=2
PACK2=1
W_P = in_size//PACK2
H_P = in_size//PACK2
C_P = in_channel//PACK4
K_P=out_channel//PACK4
A = te.placeholder((C_P,H_P,W_P*PACK4), dtype=ddtype,name="A")
W = te.placeholder((in_channel,out_channel), dtype=ddtype, name="W")
out_size = in_size
# Pad input
Apad = A
# Create reduction variables
rc = te.reduce_axis((0, in_channel), name="rc")
# Compute the convolution
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
B = te.compute(
(K_P, H_P,W_P*PACK4),
lambda ff,yy,xx_p4: extern_op.mysum(
#lambda ff,yy,xx_p4: te.sum(
#Apad[idxdiv(rc,4),yy,idxmod(rc,4)+idxdiv(xx_p4,4)*4] * W[rc,ff*PACK4+idxmod(xx_p4,4)], axis=[rc]
extern_op.mymul(Apad[idxdiv(rc,4),yy,idxmod(rc,4)+idxdiv(xx_p4,4)*4] , W[rc,ff*PACK4+idxmod(xx_p4,4)]), axis=[rc]
),
name="B",
)
if open_image ==1:
B.dtype="climgfloatw32"
# Designate the memory hierarchy
s = te.create_schedule(B.op)
#s[Apad].compute_inline() # compute Apad inline
#AA = s.cache_read(Apad, "shared", [B])
#WW = s.cache_read(W, "shared", [B])
AL = s.cache_read(Apad, "local", [B])
WL = s.cache_read(W, "local", [B])
BL = s.cache_write(B, "local")
# tile consts
tile = 1
num_thread = 4
block_factor = tile * num_thread
step = 4
vthread = 2
# Get the GPU thread indices
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis( "threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
thread_xz = te.thread_axis("vthread", name="vx")
thread_yz = te.thread_axis("vthread", name="vy")
# Split the workloads
kp, hp, wp_p4 = s[B].op.axis
wp,wp4=s[B].split(wp_p4,factor=4*2)
hpo,hpii=s[B].split(hp,factor=2)
########
hpo,hpi=s[B].split(hpo,factor=block_factor)
wpo,wpi=s[B].split(wp,factor=block_factor//2)
kpo,kpi=s[B].split(kp,factor=block_factor*16)
#target = 'llvm -mcpu=core-avx2'
#func = tvm.build(s, [A, W, B], target=target, name='mmult')
#np.random.seed(5)
#a_np = np.random.uniform(size=(C_P,H_P,W_P,PACK4)).astype("float32")
#w_np = np.random.uniform(size=(in_channel,out_channel)).astype("float32")
#ctx = tvm.context(target, 0)
#a = tvm.nd.array(a_np, ctx,dtype=A.dtype)
#w = tvm.nd.array(w_np, ctx,dtype=W.dtype)
#b = tvm.nd.array(np.zeros((K_P, H_P,W_P,PACK4), dtype="float32"), ctx, dtype=B.dtype)
#func(a, w, b)
#exit(0)
#
# Bind the iteration variables to GPU thread indices
s[B].bind(hpo, block_y)
s[B].bind(wpo, block_x)
s[B].bind(kpo, block_z)
#tyz, fi = s[B].split(fi, nparts=vthread) # virtual thread split
#txz, wi = s[B].split(wi, nparts=vthread) # virtual thread split
#ty, fi = s[B].split(hpi, nparts=num_thread)
#tx, wi = s[B].split(wpi, nparts=num_thread)
#tx, wi = s[B].split(kpi, nparts=num_thread)
#s[B].reorder(bz, by, bx, tyz, txz, ty, fi, ni)
#s[B].reorder(bz,by,bx,wi, fi,ty)
#s[B].reorder(bz,by,bx,ty,wi, fi)
#s[B].bind(tyz, thread_yz)
#s[B].bind(txz, thread_xz)
s[B].bind(hpi, thread_y)
s[B].bind(wpi, thread_x)
s[B].bind(kpi, thread_z)
s[B].reorder(wpi,hpo,kpo,hpi,wpo,hpii,kpi)
# Schedule BL local write
s[BL].compute_at(s[B],kpi)
s[B].reorder(kpi,hpii)
kp, hp, wp_p4 = s[BL].op.axis
#wpo,wpi=s[BL].split(wp,factor=2)
#hpo,hpi=s[BL].split(hp,factor=2)
#s[B].reorder(wpo,hpo,kp,wpi,hpi,p4)
wp,p4 = s[BL].split(wp_p4, factor=4)
s[BL].reorder(hp,wp,p4)
whp = s[BL].fuse(wp,hp)
rc, = s[BL].op.reduce_axis
rco,rci=s[BL].split(rc,factor=4)
#s[BL].reorder(rco,rci,whp, p4)
s[BL].reorder(rco,whp,rci,p4)
s[BL].vectorize(p4) # vectorize memory load
#s[BL].vectorize(rci) # vectorize memory load
#s[BL].unroll(p4)
s[BL].unroll(whp)
s[BL].unroll(rci)
# Attach computation to iteration variables
#s[AA].compute_at(s[BL], rco)
#s[WW].compute_at(s[BL], rco)
s[AL].compute_at(s[BL], rco)
s[WL].compute_at(s[BL], rco)
kp, hp, wp_p4 = s[AL].op.axis
#ty, ci = s[AA].split(ci, nparts=num_thread)
#tx, ni = s[AA].split(ni, nparts=num_thread)
wpo, wpi = s[AL].split(wp_p4, factor=4)
#s[AA].reorder(ty, tx, yi, xi, ci, ni)
#s[AA].bind(ty, thread_y)
#s[AA].bind(tx, thread_x)
s[AL].vectorize(wpi) # vectorize memory load
s[AL].unroll(wpo)
s[AL].unroll(hp)
# Schedule for W's shared memory load
kp, cp = s[WL].op.axis
#ty, ci = s[WL].split(ci, nparts=num_thread)
cpo, cpi = s[WL].split(cp, factor=4)
#tx, fi = s[WW].split(fi, nparts=num_thread)
#s[WW].reorder(ty, tx, yi, xi, ci, fi)
#s[WW].bind(ty, thread_y)
#s[WW].bind(tx, thread_x)
s[WL].vectorize(cpi) # vectorize memory load
s[WL].unroll(kp)
wpio,wpii = s[B].split(wp4, factor=4)
s[B].vectorize(wpii) # vectorize memory load
s[B].unroll(wpio) # vectorize memory load
s[B].unroll(hpii) # vectorize memory load
#print(tvm.lower(s, [A, W, B], simple_mode=True))
#exit(0)
###############################################################################
# Generate CUDA Kernel
# --------------------
#
# Finally we use TVM to generate and compile the CUDA kernel, and evaluate the
# latency of convolution.
#
#print(tvm.lower(s, [A, W, B], simple_mode=True))
#exit(0)
return s, [A,W, B]
def test_rpc_module():
# graph
open_image=1
ddtype='float32'
if open_image == 1:
ddtype = "climgfloatr32"
#n_num=1024
#n = tvm.runtime.convert(1024)
#A = te.placeholder((n,n), name="A", dtype=ddtype)
#B = te.compute(A.shape, lambda i,j: A(i,j) + 1.0, name="B")
#B.dtype = "float32"
#a_np = np.random.uniform(size=(n_num,n_num)).astype("float32")
temp = utils.tempdir()
## Establish remote connection with target hardware
tracker = rpc.connect_tracker(tracker_host, tracker_port)
remote = tracker.request(key, priority=0, session_timeout=60)
## Compile the Graph for CPU target
#s = te.create_schedule(B.op)
#xo, xi = s[B].split(B.op.axis[0], factor=64)
#s[B].parallel(xi)
#s[B].pragma(xo, "parallel_launch_point")
#s[B].pragma(xi, "parallel_barrier_when_finish")
#f = tvm.build(s, [A, B], target, name="myadd_cpu")
#path_dso_cpu = temp.relpath("cpu_lib.so")
#f.export_library(path_dso_cpu, ndk.create_shared)
# Execute the portable graph on cpu target
#print("Run CPU test ...")
#ctx = remote.cpu(0)
#remote.upload(path_dso_cpu)
#f2 = remote.load_module("cpu_lib.so")
#a = tvm.nd.array(a_np, ctx, A.dtype)
#b = tvm.nd.array(np.zeros((n_num,n_num), dtype="float32"), ctx)
#time_f = f2.time_evaluator(f2.entry_name, ctx, number=10)
#cost = time_f(a, b).mean
#print("%g secs/op\n" % cost)
#np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
print("test opencl")
# Compile the Graph for OpenCL target
if test_opencl:
N, H, W, CO, CI, KH, KW, strides, padding = 1, 64, 64, 512, 256, 1, 1, (1, 1), (0, 0)
PACK4 = 4
W_P = H
H_P = H
C_P = CI//PACK4
K_P = CO//PACK4
with tvm.target.Target("opencl"):
s, arg_bufs = conv2d_no_batching(N, H, W, CO, CI, KH, KW, strides, padding)
lib = tvm.build(s, arg_bufs, target_host=target_host)
f=lib
print(f.imported_modules[0].get_source()) if len(f.imported_modules) > 0 else print("source not imported")
path_dso_cl = temp.relpath("dev_lib_cl.so")
filename="dev_lib_cl.so"
f.export_library(path_dso_cl, ndk.create_shared)
remote.upload(temp.relpath(filename))
a_np = np.arange(C_P*H_P*W_P*PACK4).reshape(C_P*PACK4,H_P*W_P)
w_np = np.arange(CI*CO).reshape(K_P*4,C_P*4)
a_tvm=a_np.T.reshape(C_P*H_P*W_P,4)
B1=a_tvm[0::C_P,:]
for i in range(C_P-1):
B1=np.vstack((B1,a_tvm[i+1::C_P,:]))
a_tvm_np=B1.reshape(C_P,H_P,W_P*PACK4)*1.0
w_tvm_np=w_np.T*1.0
# calculate the right answer
A=a_np.astype("float32")
W_NP=w_np.astype("float32")
C=W_NP.dot(A)
C=C.reshape(K_P*PACK4,H_P*W_P).T.reshape(K_P*H_P*W_P,4)
B1=C[0::K_P,:]
for i in range(K_P-1):
B1=np.vstack((B1,C[i+1::K_P,:]))
c_np=B1.reshape(K_P,H_P*W_P*PACK4)
#c_np = conv2d_nchw_python(a_np, w_np, strides, padding)
target = tvm.target.Target("opencl")
ctx = remote.context(str(target), 0)
rlib = remote.load_module(filename)
a_tvm = tvm.nd.array(a_tvm_np, ctx=ctx, dtype = arg_bufs[0].dtype)
w_tvm = tvm.nd.array(w_tvm_np, ctx=ctx, dtype = arg_bufs[1].dtype)
c_tvm = tvm.nd.empty((K_P, H_P,W_P*PACK4), ctx=ctx,dtype = arg_bufs[2].dtype)
time_f = rlib.time_evaluator(rlib.entry_name, ctx, number=3)
cost = time_f(a_tvm, w_tvm, c_tvm).mean
print("Time cost of this operator: %f,%f GFLOPs" % (cost,1* H* W* 512* 256*2/cost/1000000000))
c_tvm = c_tvm.asnumpy().reshape(K_P,H_P*W_P*PACK4)
tvm.testing.assert_allclose(c_np, c_tvm, rtol=1e-2)
print("answer check passed")
if __name__ == "__main__":
test_rpc_module()
|
def fun_callback(input):
print('fun_callback sum: ',input)
return
def fun_call(one, two, f_callback):
result = one + two
f_callback(result)
return
first = 10
second = 20
fun_call(first, second, fun_callback) |
#coding=utf-8
import numpy as np
import math
#过滤网站恶意留言
def loadDataSet():
#创造数据集
postingList = [ ['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
#类别标签,1-包含敏感词 0-不包含
classVec = [0,1,0,1,0,1]
return postingList, classVec
def createVocabList(dataset):
'''
确定dataset中包含的全部单词数
'''
#使用set集合去除重复单词
vocabSet = set([])
for document in dataset:
# | 表示并集
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
'''
词集模型
将单词装换成词向量,长度len(voacbList)
'''
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print('the word: %s is not in the vocabulary' % word)
return returnVec
def bagOfWords2VecMN(vocabList, inputSet):
'''
词袋模型
考虑单词出现的次数
'''
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
else:
print('the word: {} is not in the vocabulary'.format(word))
def trainNB0(trainMatrix, trainCategory):
'''
使用Bayes算法对文本进行分类
p(c|x) = p(x|c)*p(c)/p(x)
在分类时无需计算p(x)
'''
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
#p(c=1) p(c=0)=1-p(c=1)
pAbusive = sum(trainCategory) / float(numTrainDocs)
#防止概率0
# p0Num = np.zeros(numWords)
# p1Num = np.zeros(numWords)
# p0Denom = 0
# p1Denom = 0
p0Num = np.ones(numWords)
p1Num = np.ones(numWords)
p0Denom = 2.0 #2指代当前属性可能的取值有两种
p1Denom = 2.0
for i in range(numTrainDocs):
#p(x|c)
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
#防止下溢
p1Vect = np.log(p1Num / p1Denom)
p0Vect = np.log(p0Num / p0Denom)
return p0Vect, p1Vect, pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
'''
文本分类
'''
p1 = sum(vec2Classify*p1Vec) + math.log(pClass1)
p0 = sum(vec2Classify*p0Vec) + math.log(1.0 - pClass1)
if p1>p0:
return 1
else:
return 0
def testingNB():
listOPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
#传入函数的是np.ndarray
p0V, p1V, pAb = trainNB0(np.array(trainMat), np.array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry, 'classified as ', classifyNB(thisDoc, p0V, p1V, pAb))
testEntry = ['stupid', 'garbage']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry, 'classified as ', classifyNB(thisDoc, p0V, p1V, pAb))
#使用朴素贝叶斯对垃圾邮件进行分类
def textParse(bigString):
import re
listOfTokens = re.split(r'\w*', bigString)
return [tok.lower for tok in listOfTokens if len(tok)>2]
def spamTest():
docList = []; classList = []; fullText = []
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' %i, 'r', encoding='gb18030').read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
#使用utf-8编码报错
wordList = textParse(open('email/ham/%d.txt' %i, 'r', encoding='gb18030').read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)
trainingSet = list(range(50)); testSet = []
for i in range(10):
#np.random.choice(trainingSet)
randIndex = int(np.random.uniform(0, len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat = []; trainClasses = []
for docIndex in trainingSet:
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))
errorCount = 0
for docIndex in testSet:
wordVector = setOfWords2Vec(vocabList, docList[docIndex])
if classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
errorCount += 1
# print("分类错误的测试集:", docList[docIndex])
print('the error rate is: ', float(errorCount)/ len(testSet))
#使用朴素贝叶斯从个人广告中获取倾向
if __name__ == '__main__':
# testingNB()
spamTest() |
import os, sys
root=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root)
import pytest
from langrank import prepare_train_file, train
def test_train():
langs = ["aze", "ben", "fin"]
datasets = [os.path.join(root, "sample-data", "ted-train.orig.{}".format(l)) for l in langs]
seg_datasets = [os.path.join(root, "sample-data", "ted-train.orig.spm8000.{}".format(l)) for l in langs]
rank = [[0, 1, 2], [1, 0, 2], [2, 1, 0]] # random
tmp_dir = "tmp"
prepare_train_file(datasets=datasets, segmented_datasets=seg_datasets,
langs=langs, rank=rank, tmp_dir=tmp_dir, task="MT")
output_model = "{}/model.txt".format(tmp_dir)
train(tmp_dir=tmp_dir, output_model=output_model)
assert os.path.isfile(output_model)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-07-16 07:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0012_auto_20180716_0733'),
]
operations = [
migrations.RemoveField(
model_name='botprofile',
name='icone',
),
migrations.AddField(
model_name='botprofile',
name='icon',
field=models.CharField(default='fas fa-robot', max_length=64),
),
]
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
import os
import numpy as np
import pytest
import skhep_testdata
import uproot
import uproot.writing
ROOT = pytest.importorskip("ROOT")
def test_copy(tmp_path):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = ROOT.TH1F("h1", "title", 8, -3.14, 2.71)
h1.SetBinContent(0, 0.0)
h1.SetBinContent(1, 1.1)
h1.SetBinContent(2, 2.2)
h1.SetBinContent(3, 3.3)
h1.SetBinContent(4, 4.4)
h1.SetBinContent(5, 5.5)
h1.SetBinContent(6, 6.6)
h1.SetBinContent(7, 7.7)
h1.SetBinContent(8, 8.8)
h1.SetBinContent(9, 9.9)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["h1"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("h1")
assert h3.GetBinContent(0) == pytest.approx(0.0)
assert h3.GetBinContent(1) == pytest.approx(1.1)
assert h3.GetBinContent(2) == pytest.approx(2.2)
assert h3.GetBinContent(3) == pytest.approx(3.3)
assert h3.GetBinContent(4) == pytest.approx(4.4)
assert h3.GetBinContent(5) == pytest.approx(5.5)
assert h3.GetBinContent(6) == pytest.approx(6.6)
assert h3.GetBinContent(7) == pytest.approx(7.7)
assert h3.GetBinContent(8) == pytest.approx(8.8)
assert h3.GetBinContent(9) == pytest.approx(9.9)
f3.Close()
def test_from_old(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
with uproot.open(skhep_testdata.data_path("uproot-histograms.root")) as fin:
one = fin["one"]
with uproot.recreate(newfile) as fout:
fout["one"] = one
f1 = ROOT.TFile(newfile)
h1 = f1.Get("one")
assert h1.GetBinContent(0) == 0
assert h1.GetBinContent(1) == 68
assert h1.GetBinContent(2) == 285
assert h1.GetBinContent(3) == 755
assert h1.GetBinContent(4) == 1580
assert h1.GetBinContent(5) == 2296
assert h1.GetBinContent(6) == 2286
assert h1.GetBinContent(7) == 1570
assert h1.GetBinContent(8) == 795
assert h1.GetBinContent(9) == 289
assert h1.GetBinContent(10) == 76
assert h1.GetBinContent(11) == 0
f1.Close()
def test_new_name(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
with uproot.open(skhep_testdata.data_path("uproot-histograms.root")) as fin:
one = fin["one"]
with uproot.recreate(newfile) as fout:
fout["whatever"] = one
f1 = ROOT.TFile(newfile)
h1 = f1.Get("whatever")
assert h1.GetBinContent(0) == 0
assert h1.GetBinContent(1) == 68
assert h1.GetBinContent(2) == 285
assert h1.GetBinContent(3) == 755
assert h1.GetBinContent(4) == 1580
assert h1.GetBinContent(5) == 2296
assert h1.GetBinContent(6) == 2286
assert h1.GetBinContent(7) == 1570
assert h1.GetBinContent(8) == 795
assert h1.GetBinContent(9) == 289
assert h1.GetBinContent(10) == 76
assert h1.GetBinContent(11) == 0
f1.Close()
@pytest.mark.parametrize("cls", [ROOT.TH1C, ROOT.TH1D, ROOT.TH1F, ROOT.TH1I, ROOT.TH1S])
def test_all_TH1(tmp_path, cls):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = cls("h1", "title", 2, -3.14, 2.71)
h1.Fill(-4)
h1.Fill(-3.1)
h1.Fill(-3.1)
h1.Fill(2.7, 5)
h1.Fill(3, 4)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(1)
assert h3.GetBinContent(1) == pytest.approx(2)
assert h3.GetBinContent(2) == pytest.approx(5)
assert h3.GetBinContent(3) == pytest.approx(4)
assert h3.GetBinError(0) == pytest.approx(1)
assert h3.GetBinError(1) == pytest.approx(1.4142135623730951)
assert h3.GetBinError(2) == pytest.approx(5)
assert h3.GetBinError(3) == pytest.approx(4)
f3.Close()
@pytest.mark.parametrize("cls", [ROOT.TH2C, ROOT.TH2D, ROOT.TH2F, ROOT.TH2I, ROOT.TH2S])
def test_all_TH2(tmp_path, cls):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = cls("h1", "title", 2, -3.14, 2.71, 3, -5, 10)
h1.Fill(-4, 9)
h1.Fill(-3.1, 9)
h1.Fill(-3.1, 9)
h1.Fill(2.7, -4, 5)
h1.Fill(3, 9, 4)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert [[h3.GetBinContent(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 1, 0]),
pytest.approx([0, 0, 0, 2, 0]),
pytest.approx([0, 5, 0, 0, 0]),
pytest.approx([0, 0, 0, 4, 0]),
]
f3.Close()
@pytest.mark.parametrize("cls", [ROOT.TH3C, ROOT.TH3D, ROOT.TH3F, ROOT.TH3I, ROOT.TH3S])
def test_all_TH3(tmp_path, cls):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = cls("h1", "title", 2, -3.14, 2.71, 3, -5, 10, 1, 100, 200)
h1.Fill(-4, 9, 150)
h1.Fill(-3.1, 9, 150)
h1.Fill(-3.1, 9, 150)
h1.Fill(2.7, -4, 150, 5)
h1.Fill(3, 9, 150, 4)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetNbinsZ() == 1
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert h3.GetZaxis().GetBinLowEdge(1) == pytest.approx(100)
assert h3.GetZaxis().GetBinUpEdge(1) == pytest.approx(200)
approx = pytest.approx
assert [
[[h3.GetBinContent(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 1, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 2, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 5, 0]), [0, 0, 0], approx([0, 0, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 4, 0]), [0, 0, 0]],
]
f3.Close()
def test_TProfile(tmp_path):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = ROOT.TProfile("h1", "title", 2, -3.14, 2.71)
h1.Fill(-4, 10)
h1.Fill(-3.1, 10)
h1.Fill(-3.1, 20)
h1.Fill(2.7, 20)
h1.Fill(3, 20)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(10)
assert h3.GetBinContent(1) == pytest.approx(15)
assert h3.GetBinContent(2) == pytest.approx(20)
assert h3.GetBinContent(3) == pytest.approx(20)
assert h3.GetBinError(0) == pytest.approx(0)
assert h3.GetBinError(1) == pytest.approx(np.sqrt(12.5))
assert h3.GetBinError(2) == pytest.approx(0)
assert h3.GetBinError(3) == pytest.approx(0)
f3.Close()
def test_TProfile2D(tmp_path):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = ROOT.TProfile2D("h1", "title", 2, -3.14, 2.71, 3, -5, 10)
h1.Fill(-4, 9, 10)
h1.Fill(-3.1, 9, 10)
h1.Fill(-3.1, 9, 20)
h1.Fill(2.7, -4, 20)
h1.Fill(3, 9, 20)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert [[h3.GetBinContent(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 10, 0]),
pytest.approx([0, 0, 0, 15, 0]),
pytest.approx([0, 20, 0, 0, 0]),
pytest.approx([0, 0, 0, 20, 0]),
]
assert [[h3.GetBinError(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 0, 0]),
pytest.approx([0, 0, 0, np.sqrt(12.5), 0]),
pytest.approx([0, 0, 0, 0, 0]),
pytest.approx([0, 0, 0, 0, 0]),
]
f3.Close()
def test_TProfile3D(tmp_path):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = ROOT.TProfile3D("h1", "title", 2, -3.14, 2.71, 3, -5, 10, 1, 100, 200)
h1.Fill(-4, 9, 150, 10)
h1.Fill(-3.1, 9, 150, 10)
h1.Fill(-3.1, 9, 150, 20)
h1.Fill(2.7, -4, 150, 20)
h1.Fill(3, 9, 150, 20)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetNbinsZ() == 1
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert h3.GetZaxis().GetBinLowEdge(1) == pytest.approx(100)
assert h3.GetZaxis().GetBinUpEdge(1) == pytest.approx(200)
approx = pytest.approx
assert [
[[h3.GetBinContent(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 10, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 15, 0], [0, 0, 0]],
[[0, 0, 0], [0, 20, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 20, 0], [0, 0, 0]],
]
assert [
[[h3.GetBinError(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, approx(np.sqrt(12.5)), 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
f3.Close()
def test_ex_nihilo_TH1(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TH1x(
fName="h1",
fTitle="title",
data=np.array([1.0, 2.0, 5.0, 4.0], np.float64),
fEntries=5.0,
fTsumw=7.0,
fTsumw2=27.0,
fTsumwx=7.3,
fTsumwx2=55.67,
fSumw2=np.array([1.0, 2.0, 25.0, 16.0], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(1)
assert h3.GetBinContent(1) == pytest.approx(2)
assert h3.GetBinContent(2) == pytest.approx(5)
assert h3.GetBinContent(3) == pytest.approx(4)
assert h3.GetBinError(0) == pytest.approx(1)
assert h3.GetBinError(1) == pytest.approx(1.4142135623730951)
assert h3.GetBinError(2) == pytest.approx(5)
assert h3.GetBinError(3) == pytest.approx(4)
f3.Close()
def test_ex_nihilo_TH2(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TH2x(
fName="h1",
fTitle="title",
data=np.array(
[0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, 2, 0, 4, 0, 0, 0, 0], np.float64
),
fEntries=5.0,
fTsumw=7.0,
fTsumw2=27.0,
fTsumwx=7.3,
fTsumwx2=55.67,
fTsumwy=-2.0,
fTsumwy2=242.0,
fTsumwxy=-109.8,
fSumw2=np.array(
[0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 1, 2, 0, 16, 0, 0, 0, 0], np.float64
),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
fYaxis=uproot.writing.identify.to_TAxis(
fName="yaxis",
fTitle="",
fNbins=3,
fXmin=-5.0,
fXmax=10.0,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert [[h3.GetBinContent(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 1, 0]),
pytest.approx([0, 0, 0, 2, 0]),
pytest.approx([0, 5, 0, 0, 0]),
pytest.approx([0, 0, 0, 4, 0]),
]
f3.Close()
def test_ex_nihilo_TH3(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TH3x(
fName="h1",
fTitle="title",
data=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, 2, 0, 4, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fEntries=5.0,
fTsumw=7.0,
fTsumw2=27.0,
fTsumwx=7.3,
fTsumwx2=55.67,
fTsumwy=-2.0,
fTsumwy2=242.0,
fTsumwxy=-109.8,
fTsumwz=1050.0,
fTsumwz2=157500.0,
fTsumwxz=1095.0,
fTsumwyz=-300.0,
fSumw2=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 1, 2, 0, 16, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
fYaxis=uproot.writing.identify.to_TAxis(
fName="yaxis",
fTitle="",
fNbins=3,
fXmin=-5.0,
fXmax=10.0,
),
fZaxis=uproot.writing.identify.to_TAxis(
fName="zaxis",
fTitle="",
fNbins=1,
fXmin=100.0,
fXmax=200.0,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetNbinsZ() == 1
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert h3.GetZaxis().GetBinLowEdge(1) == pytest.approx(100)
assert h3.GetZaxis().GetBinUpEdge(1) == pytest.approx(200)
approx = pytest.approx
assert [
[[h3.GetBinContent(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 1, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 2, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 5, 0]), [0, 0, 0], approx([0, 0, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 4, 0]), [0, 0, 0]],
]
f3.Close()
def test_ex_nihilo_TProfile(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TProfile(
fName="h1",
fTitle="title",
data=np.array([10, 30, 20, 20], np.float64),
fEntries=5.0,
fTsumw=3.0,
fTsumw2=3.0,
fTsumwx=-3.5,
fTsumwx2=26.51,
fTsumwy=50.0,
fTsumwy2=900.0,
fSumw2=np.array([100, 500, 400, 400], np.float64),
fBinEntries=np.array([1, 2, 1, 1], np.float64),
fBinSumw2=np.array([], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(10)
assert h3.GetBinContent(1) == pytest.approx(15)
assert h3.GetBinContent(2) == pytest.approx(20)
assert h3.GetBinContent(3) == pytest.approx(20)
assert h3.GetBinError(0) == pytest.approx(0)
assert h3.GetBinError(1) == pytest.approx(np.sqrt(12.5))
assert h3.GetBinError(2) == pytest.approx(0)
assert h3.GetBinError(3) == pytest.approx(0)
f3.Close()
def test_ex_nihilo_TProfile2D(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TProfile2D(
fName="h1",
fTitle="title",
data=np.array(
[0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 10, 30, 0, 20, 0, 0, 0, 0], np.float64
),
fEntries=5.0,
fTsumw=3.0,
fTsumw2=3.0,
fTsumwx=-3.5,
fTsumwx2=26.51,
fTsumwy=14.0,
fTsumwy2=178.0,
fTsumwxy=-66.6,
fTsumwz=50.0,
fTsumwz2=900.0,
fSumw2=np.array(
[0, 0, 0, 0, 0, 0, 400, 0, 0, 0, 0, 0, 100, 500, 0, 400, 0, 0, 0, 0],
np.float64,
),
fBinEntries=np.array(
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 2, 0, 1, 0, 0, 0, 0], np.float64
),
fBinSumw2=np.array([], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
fYaxis=uproot.writing.identify.to_TAxis(
fName="yaxis",
fTitle="",
fNbins=3,
fXmin=-5.0,
fXmax=10.0,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert [[h3.GetBinContent(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 10, 0]),
pytest.approx([0, 0, 0, 15, 0]),
pytest.approx([0, 20, 0, 0, 0]),
pytest.approx([0, 0, 0, 20, 0]),
]
assert [[h3.GetBinError(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 0, 0]),
pytest.approx([0, 0, 0, np.sqrt(12.5), 0]),
pytest.approx([0, 0, 0, 0, 0]),
pytest.approx([0, 0, 0, 0, 0]),
]
f3.Close()
def test_ex_nihilo_TProfile3D(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TProfile3D(
fName="h1",
fTitle="title",
data=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 10, 30, 0, 20, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fEntries=5.0,
fTsumw=3.0,
fTsumw2=3.0,
fTsumwx=-3.5,
fTsumwx2=26.51,
fTsumwy=14.0,
fTsumwy2=178.0,
fTsumwxy=-66.6,
fTsumwz=450.0,
fTsumwz2=67500.0,
fTsumwxz=-525.0,
fTsumwyz=2100.0,
fTsumwt=50.0,
fTsumwt2=900.0,
fSumw2=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 400, 0, 0, 0, 0, 0, 100, 500, 0, 400, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fBinEntries=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 2, 0, 1, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fBinSumw2=np.array([], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
fYaxis=uproot.writing.identify.to_TAxis(
fName="yaxis",
fTitle="",
fNbins=3,
fXmin=-5.0,
fXmax=10.0,
),
fZaxis=uproot.writing.identify.to_TAxis(
fName="zaxis",
fTitle="",
fNbins=1,
fXmin=100.0,
fXmax=200.0,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetNbinsZ() == 1
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert h3.GetZaxis().GetBinLowEdge(1) == pytest.approx(100)
assert h3.GetZaxis().GetBinUpEdge(1) == pytest.approx(200)
approx = pytest.approx
assert [
[[h3.GetBinContent(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 10, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 15, 0], [0, 0, 0]],
[[0, 0, 0], [0, 20, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 20, 0], [0, 0, 0]],
]
assert [
[[h3.GetBinError(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, approx(np.sqrt(12.5)), 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
f3.Close()
def test_delete(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TH1x(
fName="h1",
fTitle="title",
data=np.array([1.0, 2.0, 5.0, 4.0], np.float64),
fEntries=5.0,
fTsumw=7.0,
fTsumw2=27.0,
fTsumwx=7.3,
fTsumwx2=55.67,
fSumw2=np.array([1.0, 2.0, 25.0, 16.0], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
)
with uproot.recreate(newfile) as fout:
fout["one"] = h1
fout["two"] = h1
with uproot.update(newfile) as fin:
del fin["one"]
f3 = ROOT.TFile(newfile)
h3 = f3.Get("two")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(1)
assert h3.GetBinContent(1) == pytest.approx(2)
assert h3.GetBinContent(2) == pytest.approx(5)
assert h3.GetBinContent(3) == pytest.approx(4)
assert h3.GetBinError(0) == pytest.approx(1)
assert h3.GetBinError(1) == pytest.approx(1.4142135623730951)
assert h3.GetBinError(2) == pytest.approx(5)
assert h3.GetBinError(3) == pytest.approx(4)
f3.Close()
|
from __future__ import absolute_import
from sentry import tsdb
from sentry.models import ServiceHook
from sentry.testutils import APITestCase
class ProjectServiceHookStatsTest(APITestCase):
def test_simple(self):
project = self.create_project()
hook = ServiceHook.objects.get_or_create(
project_id=project.id, actor_id=self.user.id, url="http://example.com"
)[0]
self.login_as(user=self.user)
path = u"/api/0/projects/{}/{}/hooks/{}/stats/".format(
project.organization.slug, project.slug, hook.guid
)
tsdb.incr(tsdb.models.servicehook_fired, hook.id, count=3)
response = self.client.get(path)
assert response.status_code == 200
assert response.status_code == 200, response.content
assert response.data[-1]["total"] == 3, response.data
for point in response.data[:-1]:
assert point["total"] == 0
assert len(response.data) == 24
|
import json
import time
from data import get_registered_user
from kafka import KafkaProducer
def json_serializer(data):
return json.dumps(data).encode('utf-8')
def get_partition(key, all, available):
return 0
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
value_serializer=json_serializer)
if __name__ == '__main__':
while 1 == 1:
registered_user = get_registered_user()
print(registered_user)
producer.send('registered_user', registered_user)
time.sleep(4)
|
# -*- encoding: utf-8 -*-
# Module iahmin
from numpy import *
from iasecross import iasecross
def iahmin(f, h=1, Bc=iasecross()):
from iaaddm import iaaddm
from iasuprec import iasuprec
g = iaaddm(f,h)
y = iasuprec(g,f,Bc);
return y
|
import unittest
from recipeformats import mmf
class TestParseRecipe(unittest.TestCase):
def assert_equal(self, actual, expected_title, expected_categories,
expected_yield, expected_servings, expected_ingredients,
expected_directions):
actual_ingredients = [repr(i) for i in actual.ingredients]
self.assertEqual(actual.title, expected_title)
self.assertEqual(actual.categories, expected_categories)
self.assertEqual(actual.yield_, expected_yield)
self.assertEqual(actual.servings, expected_servings)
self.assertEqual(actual_ingredients, expected_ingredients)
self.assertEqual(actual.directions, expected_directions)
# Variations on this recipe follow it.
def test_when_one_column(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
' ',
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
' ',
'-----',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_no_ingredients_or_directions(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
' ',
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
'-----',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = []
expected_directions = []
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_extra_empty_lines(self):
lines = [
' ',
'---------- Recipe via Meal-Master (tm) v8.05',
' ',
' Title: Potato Casserole',
' ',
' ',
' ',
' Categories: Casserole, Potato',
' ',
' ',
' Yield: 8 Servings',
' ',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' ',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' ',
' ',
' Recipe by: From recipe files of Martha',
' ',
'-----',
' ',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_typical_empty_lines(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
'-----',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_header_and_footer(self):
lines = [
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_header_footer_and_title(self):
lines = [
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = ''
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_header_footer_and_categories(self):
lines = [
' Title: Potato Casserole',
' Yield: 8 Servings',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = 'Potato Casserole'
expected_categories = []
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_header_footer_and_yield(self):
lines = [
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 0
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_only_ingredients_and_directions(self):
lines = [
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = ''
expected_categories = []
expected_yield = ''
expected_servings = 0
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_no_ingredients(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
' ',
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
' ',
'-----',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = []
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_two_column(self):
lines = [
'MMMMM----- Recipe via Meal-Master (tm) v8.02',
' ',
' Title: Potato-Sorrel Soup',
' Categories: Soup/stew, Vegetarian',
' Yield: 6 servings',
' ',
' 4 tb Butter 1/2 ts Salt (to taste)',
' 7 c Water 1 1/2 lb Red potatoes; quartered',
' 3 md Leeks; the white parts only - lengthwise & thinly sliced',
' - chopped or cut Freshly ground pepper',
' - into 1/4-inch rounds Creme fraiche',
'MMMMM--------------------------HEADING-------------------------------',
' 6 c Loosely packed sorrel leaves 1 tb Chives',
' -the stems removed and - thinly sliced or snipped',
' - leaves roughly chopped ',
'MMMMM--------------------------HEADING-------------------------------',
' ',
' This is a rather rustic soup. For a more refined version, pass it',
' through a food mill before serving.',
' ',
'MMMMM'
]
expected_title = 'Potato-Sorrel Soup'
expected_categories = ['Soup/stew', 'Vegetarian']
expected_yield = ''
expected_servings = 6
expected_ingredients = [
'{4} {tb} {Butter}',
'{7} {c} {Water}',
'{3} {md} {Leeks; the white parts only chopped or cut into 1/4-inch rounds}',
'{1/2} {ts} {Salt (to taste)}',
'{1 1/2} {lb} {Red potatoes; quartered lengthwise & thinly sliced}',
'{} {} {Freshly ground pepper}',
'{} {} {Creme fraiche}',
'----- HEADING -----',
'{6} {c} {Loosely packed sorrel leaves the stems removed and leaves roughly chopped}',
'{1} {tb} {Chives thinly sliced or snipped}',
'----- HEADING -----',
]
expected_directions = [
'This is a rather rustic soup. For a more refined version, pass it through a food mill before serving.',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_empty(self):
lines = []
expected_title = ''
expected_categories = []
expected_yield = ''
expected_servings = 0
expected_ingredients = []
expected_directions = []
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_only_header(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
]
expected_title = ''
expected_categories = []
expected_yield = ''
expected_servings = 0
expected_ingredients = []
expected_directions = []
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_only_header_and_footer(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
'-----',
'Extra text that should not be included',
]
expected_title = ''
expected_categories = []
expected_yield = ''
expected_servings = 0
expected_ingredients = []
expected_directions = []
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
class TestIsMmfHeader(unittest.TestCase):
def test_when_empty(self):
actual = mmf._is_mmf_header('')
expected = False
self.assertEqual(actual, expected)
def test_when_normal(self):
actual = mmf._is_mmf_header('---------- Recipe via Meal-Master (tm) v8.05')
expected = True
self.assertEqual(actual, expected)
def test_when_MMMMM(self):
actual = mmf._is_mmf_header('MMMMM----- Recipe via Meal-Master (tm) v8.05')
expected = True
self.assertEqual(actual, expected)
def test_when_mmmmm(self):
actual = mmf._is_mmf_header('mmmmm----- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_extra_dash(self):
actual = mmf._is_mmf_header('----------- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_extra_M(self):
actual = mmf._is_mmf_header('MMMMMM----- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_missing_dash(self):
actual = mmf._is_mmf_header('--------- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_missing_M(self):
actual = mmf._is_mmf_header('MMMM----- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_leading_space(self):
actual = mmf._is_mmf_header(' ---------- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_trailing_space(self):
actual = mmf._is_mmf_header('---------- Recipe via Meal-Master (tm) v8.05 ')
expected = True
self.assertEqual(actual, expected)
def test_when_only_dashes(self):
actual = mmf._is_mmf_header('----------')
expected = False
self.assertEqual(actual, expected)
def test_when_only_dashes_and_space(self):
actual = mmf._is_mmf_header('---------- ')
expected = True
self.assertEqual(actual, expected)
def test_when_other_text(self):
actual = mmf._is_mmf_header('---------- Anything goes here')
expected = True
self.assertEqual(actual, expected)
def test_when_only_MMMMM_and_dashes(self):
actual = mmf._is_mmf_header('MMMMM-----')
expected = False
self.assertEqual(actual, expected)
def test_when_only_MMMMM_and_dashes_and_space(self):
actual = mmf._is_mmf_header('MMMMM----- ')
expected = True
self.assertEqual(actual, expected)
def test_when_MMMMM_other_text(self):
actual = mmf._is_mmf_header('MMMMM----- Anything goes here')
expected = True
self.assertEqual(actual, expected)
class TestIsMmfFooter(unittest.TestCase):
# only '-----' and 'MMMMM' should be considerd valid
def test_when_normal(self):
actual = mmf._is_mmf_footer('-----')
expected = True
self.assertEqual(actual, expected)
def test_when_MMMMM(self):
actual = mmf._is_mmf_footer('MMMMM')
expected = True
self.assertEqual(actual, expected)
def test_when_empty(self):
actual = mmf._is_mmf_footer('')
expected = False
self.assertEqual(actual, expected)
def test_when_leading_space(self):
actual = mmf._is_mmf_footer(' -----')
expected = False
self.assertEqual(actual, expected)
def test_when_trailing_space(self):
actual = mmf._is_mmf_footer('----- ')
expected = False
self.assertEqual(actual, expected)
def test_when_extra_dash(self):
actual = mmf._is_mmf_footer('------')
expected = False
self.assertEqual(actual, expected)
def test_when_missing_dash(self):
actual = mmf._is_mmf_footer('----')
expected = False
self.assertEqual(actual, expected)
def test_when_trailing_text(self):
actual = mmf._is_mmf_footer('-----TEXT')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_leading_space(self):
actual = mmf._is_mmf_footer(' MMMMM')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_trailing_space(self):
actual = mmf._is_mmf_footer('MMMMM ')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_extra_M(self):
actual = mmf._is_mmf_footer('MMMMMM')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_missing_M(self):
actual = mmf._is_mmf_footer('MMMM')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_trailing_text(self):
actual = mmf._is_mmf_footer('MMMMMTEXT')
expected = False
self.assertEqual(actual, expected)
class TestTestMetadata(unittest.TestCase):
def test_when_empty(self):
actual = mmf._test_metadata('')
expected = False, '', ''
self.assertEqual(actual, expected)
def test_when_colon(self):
actual = mmf._test_metadata(':')
expected = False, '', ''
self.assertEqual(actual, expected)
def test_when_no_attribute_name(self):
actual = mmf._test_metadata(' : value')
expected = False, '', ''
self.assertEqual(actual, expected)
def test_when_text_without_colon(self):
actual = mmf._test_metadata(' Chill before serving. ')
expected = False, '', ''
self.assertEqual(actual, expected)
def test_when_no_value(self):
actual = mmf._test_metadata(' Categories: ')
expected = True, 'Categories', ''
self.assertEqual(actual, expected)
def test_when_normal(self):
actual = mmf._test_metadata(' Title: 21 Club Rice Pudding')
expected = True, 'Title', '21 Club Rice Pudding'
self.assertEqual(actual, expected)
def test_when_extra_spaces(self):
actual = mmf._test_metadata(' Recipe By : Aunt Salli ')
expected = True, 'Recipe By', 'Aunt Salli'
self.assertEqual(actual, expected)
class TestSplitCategories(unittest.TestCase):
def test_when_none(self):
actual = mmf._split_categories('None')
expected = []
self.assertEqual(actual, expected)
def test_when_none_mixed_caps(self):
actual = mmf._split_categories('noNE')
expected = []
self.assertEqual(actual, expected)
def test_when_one_category(self):
actual = mmf._split_categories('Dessert')
expected = ['Dessert']
self.assertEqual(actual, expected)
def test_when_multiple_categories(self):
actual = mmf._split_categories('Dessert,Italian,Easy')
expected = ['Dessert', 'Italian', 'Easy']
self.assertEqual(actual, expected)
def test_when_multiple_categories_with_space(self):
actual = mmf._split_categories('Dessert, Italian, Easy')
expected = ['Dessert', 'Italian', 'Easy']
self.assertEqual(actual, expected)
def test_when_multiple_categories_with_more_space(self):
actual = mmf._split_categories(' Dessert , Italian , Easy ')
expected = ['Dessert', 'Italian', 'Easy']
self.assertEqual(actual, expected)
class TestGetYieldAndServings(unittest.TestCase):
def test_when_empty(self):
actual = mmf._get_yield_and_servings('')
expected = '', 0
self.assertEqual(actual, expected)
def test_when_number(self):
actual = mmf._get_yield_and_servings('10')
expected = '', 10
self.assertEqual(actual, expected)
def test_when_number_and_unit(self):
actual = mmf._get_yield_and_servings('24 cookies')
expected = '24 cookies', 0
self.assertEqual(actual, expected)
class TestGetIngredientHeading(unittest.TestCase):
def test_when_empty(self):
actual = mmf._get_ingredient_heading('')
expected = ''
self.assertEqual(actual, expected)
def test_when_not_marked_up(self):
actual = mmf._get_ingredient_heading('This is some text.')
expected = ''
self.assertEqual(actual, expected)
def test_when_heading(self):
actual = mmf._get_ingredient_heading('---------------------------------SPAM---------------------------------')
expected = 'SPAM'
self.assertEqual(actual, expected)
def test_when_minimum(self):
actual = mmf._get_ingredient_heading('-----BAR-----')
expected = 'BAR'
self.assertEqual(actual, expected)
def test_when_MMMMM_heading(self):
actual = mmf._get_ingredient_heading('MMMMM---------------------------QUICK OATS--------------------------------')
expected = 'QUICK OATS'
self.assertEqual(actual, expected)
def test_when_MMMMM_minimum(self):
actual = mmf._get_ingredient_heading('MMMMM-----JARS-----')
expected = 'JARS'
self.assertEqual(actual, expected)
def test_when_spaces(self):
actual = mmf._get_ingredient_heading('------------------------- This is a heading. --------------------------------')
expected = 'This is a heading.'
self.assertEqual(actual, expected)
class TestGetIngredient(unittest.TestCase):
def test_when_empty(self):
actual = mmf._get_ingredient('')
self.assertEqual(actual.quantity, '')
self.assertEqual(actual.unit, '')
self.assertEqual(actual.text, '')
self.assertEqual(actual.is_heading, False)
def test_when_whitespace(self):
actual = mmf._get_ingredient(' ')
self.assertEqual(actual.quantity, '')
self.assertEqual(actual.unit, '')
self.assertEqual(actual.text, '')
self.assertEqual(actual.is_heading, False)
def test_1(self):
actual = mmf._get_ingredient(' 1 qt Milk')
self.assertEqual(actual.quantity, '1')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_2(self):
actual = mmf._get_ingredient(' 1/2 qt Milk')
self.assertEqual(actual.quantity, '1/2')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_3(self):
actual = mmf._get_ingredient(' 3 1/2 qt Milk')
self.assertEqual(actual.quantity, '3 1/2')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_4(self):
actual = mmf._get_ingredient(' 1.5 qt Milk')
self.assertEqual(actual.quantity, '1.5')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_5(self):
actual = mmf._get_ingredient(' .5 qt Milk')
self.assertEqual(actual.quantity, '.5')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_6(self):
actual = mmf._get_ingredient(' 3/4 c Long-grained rice')
self.assertEqual(actual.quantity, '3/4')
self.assertEqual(actual.unit, 'c')
self.assertEqual(actual.text, 'Long-grained rice')
self.assertEqual(actual.is_heading, False)
def test_7(self):
actual = mmf._get_ingredient(' Raisins (optional)')
self.assertEqual(actual.quantity, '')
self.assertEqual(actual.unit, '')
self.assertEqual(actual.text, 'Raisins (optional)')
self.assertEqual(actual.is_heading, False)
def test_8(self):
actual = mmf._get_ingredient(' 1 Egg yolk')
self.assertEqual(actual.quantity, '1')
self.assertEqual(actual.unit, '')
self.assertEqual(actual.text, 'Egg yolk')
self.assertEqual(actual.is_heading, False)
class TestIsIngredient(unittest.TestCase):
def test_when_empty(self):
actual = mmf._is_ingredient('')
expected = False
self.assertEqual(actual, expected)
def test_when_direction(self):
actual = mmf._is_ingredient('In large bowl, blend oil and sugars on low until well mixed. Add')
expected = False
self.assertEqual(actual, expected)
def test_when_invalid_quantity(self):
actual = mmf._is_ingredient(' ab qt Milk')
expected = False
self.assertEqual(actual, expected)
def test_when_invalid_unit(self):
actual = mmf._is_ingredient(' 21 Apples')
expected = False
self.assertEqual(actual, expected)
def test_when_spacing_off(self):
actual = mmf._is_ingredient(' 1 qt Milk')
expected = False
self.assertEqual(actual, expected)
def test_when_1(self):
actual = mmf._is_ingredient(' 1 qt Milk')
expected = True
self.assertEqual(actual, expected)
def test_when_2(self):
actual = mmf._is_ingredient(' 1 1/2 c Whipped cream')
expected = True
self.assertEqual(actual, expected)
def test_when_3(self):
actual = mmf._is_ingredient(' 1 Vanilla bean')
expected = True
self.assertEqual(actual, expected)
def test_when_4(self):
actual = mmf._is_ingredient(' Raisins (optional)')
expected = True
self.assertEqual(actual, expected)
def test_when_5(self):
actual = mmf._is_ingredient(' 1.5 qt Milk')
expected = True
self.assertEqual(actual, expected)
def test_when_6(self):
actual = mmf._is_ingredient(' 1 c Oil 1 t Baking soda')
expected = True
self.assertEqual(actual, expected)
class TestGetIngredients(unittest.TestCase):
def test_when_none(self):
lines = []
expected = []
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_empty_line(self):
lines = [
(' ', False),
]
expected = []
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_empty_lines(self):
lines = [
(' ', False),
(' ', False),
(' ', False),
]
expected = []
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_one_column(self):
lines = [
(' 1 qt Milk', False),
(' 1 pt Heavy cream', False),
(' 1/2 ts Salt', False),
(' 1 Vanilla bean', False),
(' 3/4 c Long-grained rice', False),
(' 1 c Granulated sugar', False),
(' 1 Egg yolk', False),
(' 1 1/2 c Whipped cream', False),
(' Raisins (optional)', False),
]
expected = [
'{1} {qt} {Milk}',
'{1} {pt} {Heavy cream}',
'{1/2} {ts} {Salt}',
'{1} {} {Vanilla bean}',
'{3/4} {c} {Long-grained rice}',
'{1} {c} {Granulated sugar}',
'{1} {} {Egg yolk}',
'{1 1/2} {c} {Whipped cream}',
'{} {} {Raisins (optional)}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_one_column_with_extra_lines(self):
lines = [
(' ', False),
(' 1 qt Milk', False),
(' 1 pt Heavy cream', False),
(' 1/2 ts Salt', False),
(' 1 Vanilla bean', False),
(' 3/4 c Long-grained rice', False),
(' ', False),
(' 1 c Granulated sugar', False),
(' 1 Egg yolk', False),
(' 1 1/2 c Whipped cream', False),
(' Raisins (optional)', False),
(' ', False),
]
expected = [
'{1} {qt} {Milk}',
'{1} {pt} {Heavy cream}',
'{1/2} {ts} {Salt}',
'{1} {} {Vanilla bean}',
'{3/4} {c} {Long-grained rice}',
'{1} {c} {Granulated sugar}',
'{1} {} {Egg yolk}',
'{1 1/2} {c} {Whipped cream}',
'{} {} {Raisins (optional)}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_one_column_with_headings(self):
lines = [
('FOR THE PIE', True),
(' 1 1/2 c All-Purpose Flour', False),
(' 1/2 ts Salt', False),
(' 1/2 c Shortening', False),
(' 5 tb ICE Water', False),
(' 8 c Apples [peeled & sliced]', False),
(' 1/4 c Granulated Sugar', False),
(' 2 tb All-Purpose Flour', False),
(' 1/2 ts Nutmeg, Ground', False),
(' 2 tb Lemon Juice', False),
(' 1 ts Cinnamon, Ground', False),
('', False),
('FOR THE TOPPING', True),
(' 1/2 c Granulated Sugar', False),
(' 1/2 c All-Purpose Flour', False),
(' 1/3 c Butter', False),
(' 1 lg Paper Bag', False),
(' Vanilla Ice Cream', False),
]
expected = [
'----- FOR THE PIE -----',
'{1 1/2} {c} {All-Purpose Flour}',
'{1/2} {ts} {Salt}',
'{1/2} {c} {Shortening}',
'{5} {tb} {ICE Water}',
'{8} {c} {Apples [peeled & sliced]}',
'{1/4} {c} {Granulated Sugar}',
'{2} {tb} {All-Purpose Flour}',
'{1/2} {ts} {Nutmeg, Ground}',
'{2} {tb} {Lemon Juice}',
'{1} {ts} {Cinnamon, Ground}',
'----- FOR THE TOPPING -----',
'{1/2} {c} {Granulated Sugar}',
'{1/2} {c} {All-Purpose Flour}',
'{1/3} {c} {Butter}',
'{1} {lg} {Paper Bag}',
'{} {} {Vanilla Ice Cream}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_two_columns(self):
lines = [
(' 1 1/2 lb Hamburger 1 ds Salt', False),
(' 1 c Onion; chopped 1/2 c Water', False),
(' 1 c Green pepper; chopped 1/8 t Hot pepper sauce', False),
(' 1 T Oil ', False),
]
expected = [
'{1 1/2} {lb} {Hamburger}',
'{1} {c} {Onion; chopped}',
'{1} {c} {Green pepper; chopped}',
'{1} {T} {Oil}',
'{1} {ds} {Salt}',
'{1/2} {c} {Water}',
'{1/8} {t} {Hot pepper sauce}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_two_columns_with_headings(self):
lines = [
('HEADING 1', True),
(' 1 1/2 lb Hamburger 1 ds Salt', False),
(' 1 c Onion; chopped 1/2 c Water', False),
('HEADING 2', True),
(' 1 c Green pepper; chopped 1/8 t Hot pepper sauce', False),
(' 1 T Oil ', False),
('HEADING 3', True),
(' 7 oz Jack/Mozz. cheese slices 1/2 c Parmesan cheese; grated', False),
]
expected = [
'----- HEADING 1 -----',
'{1 1/2} {lb} {Hamburger}',
'{1} {c} {Onion; chopped}',
'{1} {ds} {Salt}',
'{1/2} {c} {Water}',
'----- HEADING 2 -----',
'{1} {c} {Green pepper; chopped}',
'{1} {T} {Oil}',
'{1/8} {t} {Hot pepper sauce}',
'----- HEADING 3 -----',
'{7} {oz} {Jack/Mozz. cheese slices}',
'{1/2} {c} {Parmesan cheese; grated}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_one_column_with_line_continuations(self):
lines = [
(' 1 ts Salt', False),
(' Fresh ground', False),
(' -black pepper to', False),
(' -taste', False),
(' 1 cn (6-oz) tomato paste', False),
(' 1 cn (30-oz) red kidney beans', False),
(' -drained', False),
]
expected = [
'{1} {ts} {Salt}',
'{} {} {Fresh ground black pepper to taste}',
'{1} {cn} {(6-oz) tomato paste}',
'{1} {cn} {(30-oz) red kidney beans drained}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_two_columns_with_line_continuations(self):
lines = [
(' 1 lg Artichoke; -=OR=- - and thinly sliced', False),
(' 2 md -Artichokes 6 Leaves butter lettuce', False),
(' 1 c Water; acidulated with - sliced into 1/4" strips', False),
(' - the juice of -=OR=- a handful of', False),
(' 1 Lemon - Sorrel leaves, sliced', False),
(' 2 Garlic cloves 1 tb Chopped parsley', False),
(' 1 tb Virgin olive oil 2 Mint leaves; chopped', False),
(' 1 lg Leek; white part only -=OR=- Salt', False),
(' 2 md Leeks, white part only 5 1/2 c Water', False),
(' - washed and sliced 1 lb Fresh peas; shucked, -=OR=-', False),
(' 1 sm New potato; quartered 1 c -Frozen peas', False),
]
expected = [
'{1} {lg} {Artichoke; -=OR=-}',
'{2} {md} {-Artichokes}',
'{1} {c} {Water; acidulated with the juice of}',
'{1} {} {Lemon}',
'{2} {} {Garlic cloves}',
'{1} {tb} {Virgin olive oil}',
'{1} {lg} {Leek; white part only -=OR=-}',
'{2} {md} {Leeks, white part only washed and sliced}',
'{1} {sm} {New potato; quartered and thinly sliced}',
'{6} {} {Leaves butter lettuce sliced into 1/4" strips =OR=- a handful of Sorrel leaves, sliced}',
'{1} {tb} {Chopped parsley}',
'{2} {} {Mint leaves; chopped}',
'{} {} {Salt}',
'{5 1/2} {c} {Water}',
'{1} {lb} {Fresh peas; shucked, -=OR=-}',
'{1} {c} {-Frozen peas}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
class TestParagraphizeDirections(unittest.TestCase):
def test_when_none(self):
lines = []
expected = []
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_empty(self):
lines = ['']
expected = []
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_single_line(self):
lines = [' Brown cut up pieces of meat.']
expected = ['Brown cut up pieces of meat.']
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_extra_lines(self):
lines = [' ', ' Brown cut up pieces of meat.', ' ']
expected = ['Brown cut up pieces of meat.']
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_more_extra_lines(self):
lines = [
' ',
' ',
' Brown cut up pieces of meat.',
' ',
' Brown cut up pieces of meat!',
' ',
' ',
' ',
]
expected = [
'Brown cut up pieces of meat.',
'Brown cut up pieces of meat!',
]
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_paragraph(self):
lines = [
' Brown cut up pieces of meat.Season with chili powder,salt and black',
' pepper.Add chopped vegetables and V - 8 vegetable juice. Add ketchup',
' and Worcestershire sauce to taste.',
]
expected = [
'Brown cut up pieces of meat.Season with chili powder,salt and black pepper.Add chopped vegetables and V - 8 vegetable juice. Add ketchup and Worcestershire sauce to taste.',
]
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_multiple_paragraphs(self):
lines = [
' The kind of chiles that you use determine the final flavor, you can',
' experiment with different kinds or mixing the different kinds of chiles.',
' But this is the basic recipe for prepare salsas with dry chiles.',
' ',
' Wash the chiles in water and discard the seeds and threads of chiles. Let',
' stand in water at least 2 or 3 hours or all the night, if you do not have',
' time let the chiles in warm water at least 30 min.',
' ',
' Then ground with the other ingredients.',
]
expected = [
'The kind of chiles that you use determine the final flavor, you can experiment with different kinds or mixing the different kinds of chiles. But this is the basic recipe for prepare salsas with dry chiles.',
'Wash the chiles in water and discard the seeds and threads of chiles. Let stand in water at least 2 or 3 hours or all the night, if you do not have time let the chiles in warm water at least 30 min.',
'Then ground with the other ingredients.',
]
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_multiple_paragraphs_separated_by_paragraph_marker(self):
lines = [
' The kind of chiles that you use determine the final flavor, you can',
' experiment with different kinds or mixing the different kinds of chiles.',
' But this is the basic recipe for prepare salsas with dry chiles.\x14',
' Wash the chiles in water and discard the seeds and threads of chiles. Let',
' stand in water at least 2 or 3 hours or all the night, if you do not have',
' time let the chiles in warm water at least 30 min.\x14',
' Then ground with the other ingredients.',
]
expected = [
'The kind of chiles that you use determine the final flavor, you can experiment with different kinds or mixing the different kinds of chiles. But this is the basic recipe for prepare salsas with dry chiles.',
'Wash the chiles in water and discard the seeds and threads of chiles. Let stand in water at least 2 or 3 hours or all the night, if you do not have time let the chiles in warm water at least 30 min.',
'Then ground with the other ingredients.',
]
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main() |
import os
import pynoteslib as nl
def test_save_plaintext():
nl.get_config()
n = nl.note_from_plaintext("text for testing PT save")
n.title = "testing PT save"
n.save_plaintext()
assert os.path.exists(nl.get_note_fullpath("testing_PT_save"))
os.remove(nl.get_note_fullpath("testing_PT_save", "Notes"))
|
# activate theano on gpu
import os;
#os.environ['THEANO_FLAGS'] = "device=gpu";
#import theano;
#theano.config.floatX = 'float32';
import numpy as np;
import sys
import gzip;
from six.moves import cPickle;
from vae_conv import conv_variational_autoencoder;
from keras import backend as K;
from scipy.stats import norm;
# define parameters;
# no of trajectory files and frames in each file;
n_traj = 2;
f_traj = 10000;
# fraction of train, test and pred data separation;
sep_train = 0.8;
sep_test = 0.9;
sep_pred = 1;
# choice to flatten data: "0" for NO & "1" for YES;
choice = 0;
# row and column dimension for each frame;
row = 21;
col =21;
# padding: use this incase diemsion mismatch for encoders;
# pad_row and pad_col are row or colums to be added;
pad_row = 1;
pad_col = 1;
# define parameters for variational autoencoder - convolutional;
channels = 1;
batch_size = 1000;
conv_layers = 3;
feature_maps = [128,128,128,128];
filter_shapes = [(3,3),(3,3),(3,3),(3,3)];
strides = [(1,1),(2,2),(1,1),(1,1)];
dense_layers = 1;
dense_neurons = [128];
dense_dropouts = [0];
latent_dim = 3;
epochs = 1;
nb_start = 0;
nb_end = 50;
#Unique
########################################
# loading section;
nb_select = 10;
load_step = 10;
load_start = nb_select;
load_end = nb_end+1;
# number of digits for decoding;
n_dec = 10;
# what image to pick for to decode;
pick = 400;
# figure with 10x10 digits for generator images;
n_d = 10;
n1 = 0;
########################################
# end define parameters;
# opening file;
# Unique
##########################################
# load data for labelling;
label = np.loadtxt("./../native-contact/data/cont-mat.dat");
##########################################
# open pickled file;
#with gzip.open('./aligned_fs-peptide_coor.pkl.gz', 'rb') as f3:
# (X) = cPickle.load(f3)
#x_raw = X;
#print "dataset dimension:", np.shape(x_raw);
# open dat file;
path_data_array = "./../native-contact/data/cont-mat.array";
# read dat type large file line by line to save in array
nf = n_traj*f_traj;
q = row*col;
j_count = 0;
k_count = 0;
samples = (nf);
row_num = (nf)*row;
column_num = (col);
array_f_int = np.zeros(shape=(row_num,column_num));
with open(path_data_array) as infile:
for line in infile:
array_f_string = line.split();
array_f_array = np.array(list(array_f_string), dtype='|S4');
array_f_float = array_f_array.astype(np.float);
array_f_int[j_count] = array_f_float;
if j_count == k_count:
print 'frames read:', (j_count/row);
k_count = k_count + 10000*row;
j_count = j_count + 1;
if j_count == (row_num):
break;
print('initial matrix array dimension:'), np.shape(array_f_int);
array_f = np.reshape(array_f_int, (samples, row, col));
print('final matrix array dimension:'), np.shape(array_f);
x_raw = array_f[0:];
print "dataset dimension:", np.shape(x_raw);
##########################################################################################################
##########################################################################################################
##########################################################################################################
# process of input data;
# padding;
row_dim_array = row + pad_row;
col_dim_array = col + pad_col;
# reshape data according to the choice of flatteing;
if choice == 0:
new_shape = (len(x_raw),row_dim_array,col_dim_array)
if choice == 1:
new_shape = (len(x_raw),row_dim_array*col_dim_array)
add_zero = np.zeros(new_shape,dtype = x_raw.dtype);
if choice == 0:
add_zero[0:x_raw.shape[0],0:x_raw.shape[1],0:x_raw.shape[2]] = x_raw
if choice == 1:
add_zero[0:x_raw.shape[0],0:x_raw.shape[1]] = x_raw
x_raw = add_zero;
# determine size for training, testing & prediction data;
sep_1 = int(x_raw.shape[0]*sep_train);
sep_2 = int(x_raw.shape[0]*sep_test);
sep_3 = int(x_raw.shape[0]*sep_pred);
x_train_raw = x_raw[:sep_1];
x_test_raw = x_raw[sep_1:sep_2];
x_pred_raw = x_raw[sep_2:sep_3];
print "shape to load:", "train:", np.shape(x_train_raw), "test:", np.shape(x_test_raw), "prediction:", np.shape(x_pred_raw);
# start variational autoencoder - convolutional;
# create directories;
path_1 = "./fig"
path_2 = "./imgs"
path_3 = "./hist"
path_4 = "./model"
if not os.path.exists(path_1):
os.mkdir(path_1, 0755);
if not os.path.exists(path_2):
os.mkdir(path_2, 0755);
if not os.path.exists(path_3):
os.mkdir(path_3, 0755);
if not os.path.exists(path_4):
os.mkdir(path_4, 0755);
print "compledted directories creation or if already exist - then checked";
# load data;
print "loading data";
# normalizing input image matrix;
X_train = x_train_raw.astype('float32') / np.amax(x_train_raw);
X_test = x_test_raw.astype('float32') / np.amax(x_test_raw);
X_pred = x_pred_raw.astype('float32') / np.amax(x_pred_raw);
print "shape of data loaded:", "train:", np.shape(X_train), "test:", np.shape(X_test);
# reshape to 4d tensors;
image_size = X_train.shape[-2:];
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'channels_first':
tensor_shape = (1,image_size[0],image_size[1])
else:
tensor_shape = (image_size[0],image_size[1],1)
X_train = X_train.reshape((X_train.shape[0],) + tensor_shape);
X_test = X_test.reshape((X_test.shape[0],) + tensor_shape);
print "reshaped data:", "train:", np.shape(X_train), "test:", np.shape(X_test);
# build autoencoder;
print "building variational autoencoder";
# set up parameter;
feature_maps = feature_maps[0:conv_layers];
filter_shapes = filter_shapes[0:conv_layers];
strides = strides[0:conv_layers];
autoencoder = conv_variational_autoencoder(image_size,channels,conv_layers,feature_maps,
filter_shapes,strides,dense_layers,dense_neurons,dense_dropouts,latent_dim);
#UNIQUE
#####################################################################
# load data to analyze;
conv_full_train = X_train[0:];
conv_full_test = X_test[0:];
conv_full_pred = X_pred[0:];
label = label[:len(x_raw)];
y_train_0 = label[:sep_1,0];
y_train_2 = label[:sep_1,2];
y_test_0 = label[sep_1:sep_2,0];
y_test_2 = label[sep_1:sep_2,2];
y_pred_0 = label[sep_2:sep_3,0];
y_pred_2 = label[sep_2:sep_3,2];
# pixel size of decoded figure;
row_dim = row_dim_array-pad_row;
col_dim = col_dim_array-pad_col;
# for generator images (for latent space = nD);
z_axis = np.arange(latent_dim-2);
# print "plot starts";
for load in range(load_start, load_end, load_step):
print "**********************************************loading", load;
# loading model;
autoencoder.load("./model/model_%i" %load);
####################################################################
print "decode image for train data";
# decode images;
decoded_imgs_full = autoencoder.decode(conv_full_train);
# save decoded arary to file;
np.savetxt('./imgs/decoded_train_%i.out' %load, np.reshape(decoded_imgs_full[:, 0:row_dim, 0:col_dim, :],
(len(decoded_imgs_full), (row_dim*col_dim))), fmt='%f');
# plot decoded images;
import matplotlib.pyplot as plt;
plt.switch_backend('agg');
plt.figure(figsize=(20, 4));
for i in range (n_dec):
# display original;
ax = plt.subplot(2, n_dec, i + 1);
plt.imshow(conv_full_train[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim));
np.savetxt('./imgs/original_imgs_train_%i_%i.out' %(i,load),
(conv_full_train[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim)));
plt.colorbar(orientation='vertical');
ax.get_xaxis().set_visible(False);
ax.get_yaxis().set_visible(False);
# display reconstruction;
ax = plt.subplot(2, n_dec, i + 1 + n_dec);
plt.imshow(decoded_imgs_full[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim));
np.savetxt('./imgs/decoded_imgs_train_%i_%i.out' %(i,load),
(decoded_imgs_full[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim)));
plt.colorbar(orientation='vertical');
ax.get_xaxis().set_visible(False);
ax.get_yaxis().set_visible(False);
plt.savefig('./fig/decoded_train_%i.png' %load, dpi=600);
plt.clf();
####################################################################
print "decode image for test data";
# decode images;
decoded_imgs_full = autoencoder.decode(conv_full_test);
# save decoded arary to file;
np.savetxt('./imgs/decoded_test_%i.out' %load, np.reshape(decoded_imgs_full[:, 0:row_dim, 0:col_dim, :],
(len(decoded_imgs_full), (row_dim*col_dim))), fmt='%f');
# plot decoded images;
import matplotlib.pyplot as plt;
plt.figure(figsize=(20, 4));
for i in range (n_dec):
# display original;
ax = plt.subplot(2, n_dec, i + 1);
plt.imshow(conv_full_train[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim));
np.savetxt('./imgs/original_imgs_test_%i_%i.out' %(i,load),
(conv_full_train[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim)));
plt.colorbar(orientation='vertical');
ax.get_xaxis().set_visible(False);
ax.get_yaxis().set_visible(False);
# display reconstruction;
ax = plt.subplot(2, n_dec, i + 1 + n_dec);
plt.imshow(decoded_imgs_full[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim));
np.savetxt('./imgs/decoded_imgs_test_%i_%i.out' %(i,load),
(decoded_imgs_full[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim)));
plt.colorbar(orientation='vertical');
ax.get_xaxis().set_visible(False);
ax.get_yaxis().set_visible(False);
plt.savefig('./fig/decoded_test_%i.png' %load, dpi=600);
plt.clf();
####################################################################
print "encode image for train data";
# encode images;
# project inputs on the latent space;
x_pred_encoded = autoencoder.return_embeddings(conv_full_train);
# save encoded array to file ;
np.savetxt('./imgs/encoded_train_%i.out' %load, x_pred_encoded, fmt='%f');
# plot 1:
Dmax = y_train_2;
[n,s] = np.histogram(Dmax, 11);
d = np.digitize(Dmax, s);
#[n,s] = np.histogram(-np.log10(Dmax), 11);
#d = np.digitize(-np.log10(Dmax), s);
from matplotlib import cm;
import matplotlib as mpl;
cmi = plt.get_cmap('jet');
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax));
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240);
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi);
import numpy as np;
from mpl_toolkits.mplot3d import Axes3D;
import matplotlib.pyplot as plt;
fig = plt.figure();
ax = fig.add_subplot(111, projection='3d');
# scatter3D requires a 1D array for x, y, and z;
# ravel() converts the 100x100 array into a 1x10000 array;
p = ax.scatter3D(np.ravel(x_pred_encoded[:, 0]),
np.ravel(x_pred_encoded[:, 1]),
np.ravel(x_pred_encoded[:, 2]),
marker='o', c=scalarMap.to_rgba(Dmax));
ax.set_xlim3d(np.amin(np.ravel(x_pred_encoded[:, 0])), np.amax(np.ravel(x_pred_encoded[:, 0])));
ax.set_ylim3d(np.amin(np.ravel(x_pred_encoded[:, 1])), np.amax(np.ravel(x_pred_encoded[:, 1])));
ax.set_zlim3d(np.amin(np.ravel(x_pred_encoded[:, 2])), np.amax(np.ravel(x_pred_encoded[:, 2])));
ax.set_xlabel('VAE 0');
ax.set_ylabel('VAE 1');
ax.set_zlabel('VAE 2');
scalarMap.set_array(Dmax);
fig.colorbar(scalarMap);
plt.savefig('./fig/encoded_train_%i.png' %load, dpi=600);
plt.clf();
####################################################################
print "encode image for test data";
# encode images;
# project inputs on the latent space;
x_pred_encoded = autoencoder.return_embeddings(conv_full_test);
# save encoded array to file ;
np.savetxt('./imgs/encoded_test_%i.out' %load, x_pred_encoded, fmt='%f');
# plot 1:
Dmax = y_test_2;
[n,s] = np.histogram(Dmax, 11);
d = np.digitize(Dmax, s);
#[n,s] = np.histogram(-np.log10(Dmax), 11);
#d = np.digitize(-np.log10(Dmax), s);
from matplotlib import cm;
import matplotlib as mpl;
cmi = plt.get_cmap('jet');
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax));
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240);
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi);
import numpy as np;
from mpl_toolkits.mplot3d import Axes3D;
import matplotlib.pyplot as plt;
fig = plt.figure();
ax = fig.add_subplot(111, projection='3d');
# scatter3D requires a 1D array for x, y, and z;
# ravel() converts the 100x100 array into a 1x10000 array;
p = ax.scatter3D(np.ravel(x_pred_encoded[:, 0]),
np.ravel(x_pred_encoded[:, 1]),
np.ravel(x_pred_encoded[:, 2]),
marker='o', c=scalarMap.to_rgba(Dmax));
ax.set_xlim3d(np.amin(np.ravel(x_pred_encoded[:, 0])), np.amax(np.ravel(x_pred_encoded[:, 0])));
ax.set_ylim3d(np.amin(np.ravel(x_pred_encoded[:, 1])), np.amax(np.ravel(x_pred_encoded[:, 1])));
ax.set_zlim3d(np.amin(np.ravel(x_pred_encoded[:, 2])), np.amax(np.ravel(x_pred_encoded[:, 2])));
ax.set_xlabel('VAE 0');
ax.set_ylabel('VAE 1');
ax.set_zlabel('VAE 2');
scalarMap.set_array(Dmax);
fig.colorbar(scalarMap);
plt.savefig('./fig/encoded_test_%i.png' %load, dpi=600);
plt.clf();
####################################################################
print "generate image";
# building generator;
# build a digit generator that can sample from the learned distribution;
# display a 2D manifold of the digits;
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian;
figure = np.zeros((row_dim * n_d, col_dim * n_d));
grid_x = norm.ppf(np.linspace(0.05, 0.95, n_d));
grid_y = norm.ppf(np.linspace(0.05, 0.95, n_d));
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
n1 = n1 + 1;
z_sample = np.append([xi, yi], [z_axis]);
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, latent_dim);
x_decoded = autoencoder.generate(z_sample);
digit = x_decoded[0, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim);
# saving generated array to file;
# np.savetxt('./generated/digit_%i.out' %n1, digit, fmt='%f');
figure[i * row_dim: (i + 1) * row_dim,
j * col_dim: (j + 1) * col_dim] = digit;
plt.figure(figsize=(10, 10));
plt.imshow(figure);
plt.savefig('./fig/generated_%i.png' %load, dpi=600);
plt.clf();
###############################################################################
# END UNIQUE
|
from django.test import TestCase
from api.aqi_converter import AqiConverter
class ApiConverter(TestCase):
def test_pm1_micrograms_to_aqi(self):
converter = AqiConverter()
self.assertEqual(converter.pm1_micrograms_to_aqi(5), 4)
|
import pyembroidery as emb
def h_straight(pattern, x, y, dx, nx):
pattern.add_stitch_absolute(emb.JUMP,x,y)
for x in range(x,x_nx,dx):
pattern.add_stitch_absolute(emb.STITCH, x,y)
if x != x+nx:
pattern.add_stitch_absolute(emb.STITCH, x+nx,y)
pattern.add_command(emb.STOP)
def v_straight(pattern, x, y, dy, ny):
pattern.add_stitch_absolute(emb.JUMP,x,y)
for y in range(y,y+ny,dy):
pattern.add_stitch_absolute(emb.STITCH, x,y)
if y != y+ny:
pattern.add_stitch_absolute(emb.STITCH, x,y+ny)
pattern.add_command(emb.STOP)
def squares(pattern,del):
for inset in range(10,50):
square(inset, inset)
pattern.add_command(emb.END)
return pattern
pattern = squares()
emb.write(pattern, "squares.pes")
|
# -*- coding: utf-8 -*-
# @Time : 5/30/2018 2:26 PM
# @Author : sunyonghai
# @File : six2one2.py
# @Software: ZJ_AI
import argparse
import itertools
import json
import logging
import os
import random
from xml.dom.minidom import parseString
import cv2
import numpy as np
from PIL import Image
from lxml.etree import Element, SubElement, tostring
import config
import io_utils
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def _read_class_mapping(path):
with open(path, 'r') as f:
# names_to_labels
data = json.load(f)
return data
def names_to_labels():
result = _read_class_mapping(config.LABEL_MAPPING_PATH)
return result
voc_classes = names_to_labels()
classes = voc_classes
labels = {}
for key, value in classes.items():
labels[value] = key
def num_classes():
return len(classes)
def name_to_label(name):
return int(classes[name])
def label_to_name(label):
return labels[label]
def resize_image(img, grid_width=100, grid_height=100):
if img == None or grid_width <=0 or grid_height <=0:
return
img = np.asarray(img)
(w, h, _) = img.shape
scale = random.random()
scale = 0.5 if scale < 0.5 else scale
try:
while (w > grid_width or h > grid_height):
img = cv2.resize(img, (int(w*scale), int(h*scale)))
(h, w, _) = img.shape
except Exception as ex:
logger.error('{}'.format(ex))
return img
def read_image_rgb(path):
try:
image = Image.open(path)
except Exception as ex:
logger.error('{}'.format(path))
return image.copy()
def load_bg(path):
return read_image_rgb(path)
def load_image(image_index):
path = labels_images_paths[image_index][1]
return read_image_rgb(path)
def load_image_group(group):
return [load_image(image_index) for image_index in group]
def labels_image_paths(labels_images_dir):
labels_image_paths ={}
try:
for f in os.listdir(labels_images_dir):
sub_folder = os.path.join(labels_images_dir, f)
for label_img in os.listdir(sub_folder):
key = '{}:{}'.format(f, label_img)
value = os.path.join(labels_images_dir, sub_folder, label_img)
labels_image_paths[key] = value
except Exception as ex :
logging.info('labels_image_paths convert error:\n{}'.format(ex))
return sorted(labels_image_paths.items(), key=lambda item: item[0])
def background(bg_dir):
bg_paths=[]
try:
for bg_file in os.listdir(bg_dir):
bg_path = os.path.join(bg_dir, bg_file)
bg_paths.append(bg_path)
except Exception as ex:
logger.info('{}'.format(ex))
return bg_paths
def group_images(lenght):
order = list(range(lenght))
random.shuffle(order)
# divide into groups, one group = one batch
groups = [[order[x % len(order)] for x in range(i, i + batch_size)] for i in range(0, len(order), batch_size)]
return groups
def preprocess_image(image):
return image
def random_transform(image):
return image
def preprocess_group_entry(image):
# preprocess the image
image = preprocess_image(image)
# randomly transform image and annotations
image = random_transform(image)
return image
def preprocess_group(image_group):
for index, image in enumerate(image_group):
# preprocess a single group entry
image = preprocess_group_entry(image)
# copy processed data back to group
image_group[index] = image
return image_group
def calu_box(image, xmin, ymin):
box = np.zeros((1, 5),dtype=np.int)
xmax = xmin + image.size[0]
ymax = ymin + image.size[1]
box[0, 0] = int(xmin)
box[0, 1] = int(ymin)
box[0, 2] = int(xmax)
box[0, 3] = int(ymax)
return box
def fusion(bg_img, image_group, group):
if bg_img == None:
return None, None
fusion_img = bg_img.copy()
boxes = np.zeros((0, 5), dtype=np.int)
grid_width, grid_height = fusion_img.size[0]//(batch_size//2), fusion_img.size[1]//2
row, col = 0, 0
for idx, image in enumerate(image_group):
img = resize_image(image, grid_width * ratio, grid_height* ratio)
xmin = grid_width * col + grid_width * (1 - ratio)
ymin = grid_height * row + grid_height * (1 - ratio)
img = Image.fromarray(img)
box = calu_box(img, xmin, ymin)
class_name = labels_images_paths[group[idx]][0]
label = name_to_label(class_name.split(':')[0])
box[0, 4] = label
temp_box = [box[0,0],box[0,1]]
fusion_img.paste(img, temp_box)
boxes = np.append(boxes, box, axis=0)
col+=1
if(col == batch_size//2):
row+=1
col=0
return fusion_img, boxes
def next_group():
global group_index
if group_index == 0:
# shuffle groups at start of epoch
random.shuffle(groups)
group = groups[group_index]
group_index = (group_index + 1) % len(groups)
return group
def next_bg():
curr_bg = next(bg_paths_cycle)
return curr_bg
def next_fusion_name():
global name
str_name = "fusion" + "_" + '2018-05-31' + "_" + str(name)
name+=1
return str_name
def print_name(i):
print(next_fusion_name())
# save data(image and annotations)
def save_image(name, image):
try:
path =os.path.join(output,'JPEGImages', name+".jpg")
image.save(path, 'jpeg')
except Exception as ex:
logger.error('{}\n{}'.format(ex, path))
def save_annotations(name, size, annotations):
dom = create_xml(name, size, annotations)
write_xml(name, dom)
def check_border(bbox, width, height):
if len(bbox) <4:
return
if bbox[0] <= 0.0:
bbox[0]= 1
if bbox[1] <= 0.0:
bbox[1] = 1
if bbox[2] >= width:
bbox[2] = width - 1
if bbox[3] >= height:
bbox[3] = height - 1
def create_xml(name, size, annotations):
node_root = Element('annotation')
node_folder = SubElement(node_root, 'folder')
node_folder.text = 'JPEGImages'
node_filename = SubElement(node_root, 'filename')
filename = name + ".jpg"
node_filename.text = filename
node_path = SubElement(node_root, 'path')
node_path.text = ''
node_size = SubElement(node_root, 'size')
node_width = SubElement(node_size, 'width')
node_width.text = str(size[0])
node_height = SubElement(node_size, 'height')
node_height.text = str(size[1])
node_depth = SubElement(node_size, 'depth')
node_depth.text = str(3)
node_segmented = SubElement(node_root, 'segmented')
node_segmented.text = '0'
for box in annotations:
check_border(box, size[0],size[1])
node_object = SubElement(node_root, 'object')
node_name = SubElement(node_object, 'name')
caption = labels[box[4]]
node_name.text = caption
node_pose = SubElement(node_object, 'pose')
node_pose.text = 'Unspecified'
node_truncated = SubElement(node_object, 'truncated')
node_truncated.text = '0'
node_difficult = SubElement(node_object, 'difficult')
node_difficult.text = '0'
node_bndbox = SubElement(node_object, 'bndbox')
node_xmin = SubElement(node_bndbox, 'xmin')
node_xmin.text = str(int(box[0]))
node_ymin = SubElement(node_bndbox, 'ymin')
node_ymin.text = str(int(box[1]))
node_xmax = SubElement(node_bndbox, 'xmax')
node_xmax.text = str(int(box[2]))
node_ymax = SubElement(node_bndbox, 'ymax')
node_ymax.text = str(int(box[3]))
xml = tostring(node_root, pretty_print=True)
dom = parseString(xml)
return dom
def write_xml(name, dom):
filename = name + '.xml'
xml_path = os.path.join(output, 'Annotations', filename)
with open(xml_path, 'w+') as f:
dom.writexml(f, addindent='', newl='', encoding='utf-8')
def process():
# load bg
bg_image = load_bg(next_bg())
# load group(six label image)
group = next_group()
image_group = load_image_group(group)
# start preprocess
image_group = preprocess_group(image_group)
# fusion
fusion_img, boxes = fusion(bg_image,image_group, group)
if fusion_img is not None and boxes is not None:
# save image
name = next_fusion_name()
save_image(name, fusion_img)
# save annotations
save_annotations(name, fusion_img.size, boxes)
# finish
logger.info('{} save successfully'.format(name))
else:
logger.info('{}'.format('bag bg'))
def single_process():
global steps, Epochs
for _ in range(Epochs):
for _ in range(steps):
process()
parser = argparse.ArgumentParser(description='Get the data info')
parser.add_argument('-b', '--bg',help='directory of data path', default= '/home/syh/train_data/fusion/background_1333-800')
parser.add_argument('-c', '--crop_commdity',help='txt of path', default= '/home/syh/train_data/data/crop_commdity')
parser.add_argument('-o', '--output',help='output diretory', default='/home/syh/train_data/fusion/fusion_train_data')
# # Test data
# parser.add_argument('-b', '--bg',help='directory of data path', default= '/home/syh/train_data/fusion_test/background')
# parser.add_argument('-c', '--crop_commdity',help='txt of path', default= '/home/syh/train_data/fusion_test/crop_commdity')
# parser.add_argument('-o', '--output',help='output diretory', default='/home/syh/train_data/fusion_test/fusion_train_data')
args = parser.parse_args()
ratio = 0.9
bg_index = 0
group_index = 0
batch_size = 4
name = 80000
counter = 0
labels_images_dir = args.crop_commdity
bg_dir = args.bg
bg_paths = background(bg_dir)
bg_paths_cycle = itertools.cycle(bg_paths)
labels_images_paths = labels_image_paths(labels_images_dir)
groups = group_images(len(labels_images_paths))
output= args.output
Epochs = 1
steps = len(bg_paths)
def main():
JPEGImages_dir = os.path.join(output, 'JPEGImages')
Annotations_dir = os.path.join(output, 'Annotations')
if os.path.exists(JPEGImages_dir):
# io_utils.remove_all(JPEGImages_dir)
pass
else:
io_utils.mkdir(JPEGImages_dir)
if os.path.exists(Annotations_dir):
# io_utils.remove_all(Annotations_dir)
pass
else:
io_utils.mkdir(Annotations_dir)
single_process()
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-12 02:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dashboard', '0002_auto_20171209_1806'),
]
operations = [
migrations.RemoveField(
model_name='instruction',
name='step_num',
),
migrations.AlterField(
model_name='instruction',
name='instruction',
field=models.CharField(max_length=5000),
),
migrations.AlterUniqueTogether(
name='calendar',
unique_together=set([('date', 'user')]),
),
]
|
import pytest
from tests.functional.pages.hello import HelloPage
from tests.functional.utils import screenshot_on_failure
from tests.functional.utils import validate_redirect
url = "http://localhost:8000/hello/"
@pytest.mark.functional
@screenshot_on_failure
def test(browser, request):
page = HelloPage(browser, url)
assert page.greeting.text == "Hello, Anon!"
assert page.address.text == "Your location is XZ."
page.name_input.clear()
page.address_input.clear()
page.name_input.send_keys("Mike")
page.submit_button.click()
validate_redirect(page, url)
assert page.greeting.text == "Hello, Mike!"
assert page.address.text == "Your location is XZ."
# assert page.name_input.get_attribute("value") == "Mike"
page.name_input.clear()
page.address_input.clear()
page.address_input.send_keys("localhost")
page.submit_button.click()
validate_redirect(page, url)
assert page.greeting.text == "Hello, Anon!"
assert page.address.text == "Your location is localhost."
# assert page.address_input.get_attribute("value") == "localhost"
page.name_input.clear()
page.address_input.clear()
page.name_input.send_keys("Mike")
page.address_input.send_keys("localhost")
page.submit_button.click()
validate_redirect(page, url)
assert page.greeting.text == "Hello, Mike!"
assert page.address.text == "Your location is localhost."
# assert page.name_input.get_attribute("value") == "Mike"
# assert page.address_input.get_attribute("value") == "localhost"
|
from functools import partial
from pyramid.view import view_defaults
from pyramid.view import view_config
from sqlalchemy.orm import subqueryload
from sqlalchemy.sql.functions import concat
from c2cgeoform.schema import GeoFormSchemaNode
from c2cgeoform.views.abstract_views import ListField
from deform.widget import FormWidget
from c2cgeoportal_commons.models.main import Theme, Interface, Role, Functionality
from c2cgeoportal_admin.schemas.treegroup import children_schema_node
from c2cgeoportal_admin.schemas.functionalities import functionalities_schema_node
from c2cgeoportal_admin.schemas.metadata import metadatas_schema_node
from c2cgeoportal_admin.schemas.interfaces import interfaces_schema_node
from c2cgeoportal_admin.schemas.roles import roles_schema_node
from c2cgeoportal_admin.views.treeitems import TreeItemViews
_list_field = partial(ListField, Theme)
base_schema = GeoFormSchemaNode(Theme, widget=FormWidget(fields_template='theme_fields'))
base_schema.add(children_schema_node(only_groups=True))
base_schema.add(functionalities_schema_node.clone())
base_schema.add(roles_schema_node('restricted_roles'))
base_schema.add(interfaces_schema_node.clone())
base_schema.add(metadatas_schema_node.clone())
base_schema.add_unique_validator(Theme.name, Theme.id)
@view_defaults(match_param='table=themes')
class ThemeViews(TreeItemViews):
_list_fields = TreeItemViews._list_fields + [
_list_field('ordering'),
_list_field('public'),
_list_field('icon'),
_list_field(
'functionalities',
renderer=lambda themes: ', '.join(
['{}={}'.format(f.name, f.value)
for f in sorted(themes.functionalities, key=lambda f: f.name)]),
filter_column=concat(Functionality.name, '=', Functionality.value)
),
_list_field(
'restricted_roles',
renderer=lambda themes: ', '.join([r.name or '' for r in themes.restricted_roles]),
filter_column=Role.name
),
_list_field(
'interfaces',
renderer=lambda themes: ', '.join(
[i.name or '' for i in sorted(themes.interfaces, key=lambda i: i.name)]),
filter_column=Interface.name
)] + TreeItemViews._extra_list_fields_no_parents
_id_field = 'id'
_model = Theme
_base_schema = base_schema
def _base_query(self, query=None):
return super()._base_query(
self._request.dbsession.query(Theme).distinct().
outerjoin('interfaces').
outerjoin('restricted_roles').
outerjoin('functionalities').
options(subqueryload('functionalities')).
options(subqueryload('restricted_roles')).
options(subqueryload('interfaces')))
@view_config(route_name='c2cgeoform_index',
renderer='../templates/index.jinja2')
def index(self):
return super().index()
@view_config(route_name='c2cgeoform_grid',
renderer='fast_json')
def grid(self):
return super().grid()
@view_config(route_name='c2cgeoform_item',
request_method='GET',
renderer='../templates/edit.jinja2')
def view(self):
return super().edit()
@view_config(route_name='c2cgeoform_item',
request_method='POST',
renderer='../templates/edit.jinja2')
def save(self):
return super().save()
@view_config(route_name='c2cgeoform_item',
request_method='DELETE',
renderer='fast_json')
def delete(self):
return super().delete()
@view_config(route_name='c2cgeoform_item_duplicate',
request_method='GET',
renderer='../templates/edit.jinja2')
def duplicate(self):
return super().duplicate()
|
#!/usr/bin/python3
from distutils.core import setup
import setuptools # noqa
from ulozto_downloader import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='ulozto-downloader',
version=__version__,
license='MIT',
description='Uloz.to quick multiple sessions downloader.',
long_description=long_description,
long_description_content_type="text/markdown",
author='Jiří Setnička',
author_email='[email protected]',
url='https://github.com/setnicka/ulozto-downloader',
install_requires=[
'requests',
'Pillow',
'ansicolors',
'numpy',
'torpy',
# Currently it is forbidden to upload packages to PyPI which depends on URL requirements... :(
# 'tflite_runtime @ https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp36-cp36m-linux_x86_64.whl ; python_version == "3.6" and platform_system == "Linux" and platform_machine == "x86_64"',
# 'tflite_runtime @ https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp37-cp37m-linux_x86_64.whl ; python_version == "3.7" and platform_system == "Linux" and platform_machine == "x86_64"',
# 'tflite_runtime @ https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl ; python_version == "3.8" and platform_system == "Linux" and platform_machine == "x86_64"',
# 'tflite_runtime @ https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp36-cp36m-win_amd64.whl ; python_version == "3.6" and platform_system == "Windows"',
# 'tflite_runtime @ https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp37-cp37m-win_amd64.whl ; python_version == "3.7" and platform_system == "Windows"',
# 'tflite_runtime @ https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-win_amd64.whl ; python_version == "3.8" and platform_system == "Windows"'
],
python_requires='>=3.6',
packages=setuptools.find_packages(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
scripts=['ulozto-downloader']
)
|
import torch
from tensorboardX import SummaryWriter
import numpy as np
import os
from datetime import datetime
from multiprocessing import cpu_count
import gc
import random
import sys
os.chdir("style_transfer")
sys.path.append("./../models")
sys.path.append("./../general_modules")
from st_argument_parser_helper import parse_arguments
from st_train_helpers import init_dataloader, init_model, pprint_and_log
from my_losses import DecoderLoss
from my_train_helpers import init_optimizer, ssave
def train(args):
args.mode = "train"
# number of cpu and gpu devices
n_gpu = torch.cuda.device_count()
n_cpu = cpu_count()
print(f"Number of cuda devices: {n_gpu} | Number of CPU cores: {n_cpu}")
# specify main device and all devices (if gpu available)
device_list = [torch.device(f"cuda:{i}") for i in range(n_gpu)]
main_device = device_list[0] if n_gpu > 0 else torch.device("cpu")
print(f"Main device: {main_device}")
print(f"Parallel devices = {device_list}")
if args.deterministic:
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
# initialize cuDNN backend
if args.cudnn_backend and n_gpu > 0:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = False
# paths for saveing gradient flow and checkpoints
if args.saving:
checkpoint_path = os.path.join(args.checkpoint_path, args.run_name)
else:
checkpoint_path = None
# load checkpoint and fetch its arguments
if args.load_checkpoint:
# most recent checkpoint if not specificed
specific_checkpoint = os.listdir(checkpoint_path)[-1] if args.run_subname == "" else args.run_subname
specific_checkpoint_path = os.path.join(checkpoint_path, specific_checkpoint)
checkpoint = torch.load(specific_checkpoint_path, map_location = main_device)
args = checkpoint["args"]
print(f"Loaded arguments from {specific_checkpoint_path}")
else:
checkpoint = None
# initialize dataloader module
dataloader = init_dataloader(args)
# initialize masque model, optionally load checkpoint and wrap in DataParallel
model = init_model(args, dataloader, main_device, device_list, checkpoint)
# intialize custom optimizer, optionally load state from checkpoint
optimizer, current_epoch, current_train_step, global_train_step = init_optimizer(args, model, dataloader, checkpoint)
results = {"loss": [],
"lambdas": {"vocab": [], "question": [], "qa_answer": [], "passage": []}}
# initialize summary writer
writer = SummaryWriter(os.path.join("runs", args.run_name)) if args.saving else None
# initilaize the loss function
loss_fn = DecoderLoss(dataloader.dataset.pad_idx, dataloader.dataset.unk_idx)
if n_gpu > 1:
loss_fn = torch.nn.DataParallel(loss_fn, device_ids = device_list, output_device = main_device)
loss_fn = loss_fn.to(main_device)
# create folders for saving gradient flow and checkpoint if need
if not bool(checkpoint) and args.saving:
os.mkdir(checkpoint_path)
gc.collect()
for epoch in range(current_epoch, args.max_epochs):
for train_step, batch in enumerate(dataloader, start = current_train_step):
global_train_step += 1
try:
take_train_step(batch, model, optimizer, loss_fn, main_device, results)
except RuntimeError as e:
# to catch OOM errors
print("[{}]".format(datetime.now().time().replace(microsecond = 0)), global_train_step, e)
del batch
gc.collect()
for device_id in range(n_gpu):
with torch.cuda.device(f"cuda:{device_id}"):
torch.cuda.empty_cache()
# empty cache after the first (optimizing) iteration
if args.cudnn_backend and global_train_step == 1:
gc.collect()
for device_id in range(n_gpu):
with torch.cuda.device(f"cuda:{device_id}"):
torch.cuda.empty_cache()
# print and log to the summary writer
if (not global_train_step % args.print_and_log_every) and global_train_step:
pprint_and_log(writer, results, global_train_step, optimizer.get_learning_rate())
results = {"loss": [],
"lambdas": {"vocab": [], "question": [], "qa_answer": [], "passage": []}}
# save checkpoint
if (not global_train_step % args.save_every) and global_train_step:
ssave(model, optimizer, args, epoch, current_train_step, global_train_step,
checkpoint_path, "ST_model")
current_train_step = 0
gc.collect()
print("[{}] Finished epoch {}".format(datetime.now().time().replace(microsecond = 0), epoch))
if bool(writer):
ssave(model, optimizer, args, epoch + 1, current_train_step, global_train_step,
checkpoint_path, "ST_model")
if writer is not None:
writer.close()
def take_train_step(batch, model, optimizer, loss_fn, device, results):
# Representations of the sequences in the fixed vocabulary (indices)
passage_fixed_vectors = batch[0].to(device) # 2d long tensor [batch_size x seq_len_passage]
query_fixed_vectors = batch[1].to(device) # (2d long tensor [batch_size x seq_len_question]
qa_answer_fixed_vectors = batch[2].to(device) # 2d long tensor [batch_size x seq_len_qa]
nlg_answer_src_vectors = batch[3].to(device) # (2d long tensor) [batch_size x seq_len_nlg - 1]
# Representation of the NLG answer in the extended vocabulary (shifted, ends with eos token)
nlg_answer_trg_vectors = batch[4].to(device) # (2d long tensor) [batch_size x seq_len_nlg - 1]
# Representation of the concatination of passage, question and qa_answer in the extended vocabulary
source_ext_vectors = batch[5].to(device) # (2d long tensor) [batch_size x seq_len_passage + seq_len_question + seq_len_answer]
d_ext_vocab = source_ext_vectors.max().item() + 1
del batch
# forward pass
dec_scores, lambdas = model(passage_fixed_vectors, query_fixed_vectors, qa_answer_fixed_vectors,
source_ext_vectors, d_ext_vocab, nlg_answer_src_vectors)
# calculate loss per example
loss = loss_fn(dec_scores, nlg_answer_trg_vectors)
# add the average loss to the computational graph
loss.mean().backward()
# clip gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# apply gradients
optimizer.step()
optimizer.zero_grad()
# store losses and lambdas per example
with torch.no_grad():
lambda_vocab, lambda_question, lambda_qa_answer, lambda_passage = torch.split(lambdas.mean(dim = 1), 1, dim = -1)
results["loss"].extend(loss.tolist())
results["lambdas"]["vocab"].extend(lambda_vocab.tolist())
results["lambdas"]["question"].extend(lambda_question.tolist())
results["lambdas"]["qa_answer"].extend(lambda_qa_answer.tolist())
results["lambdas"]["passage"].extend(lambda_passage.tolist())
if __name__ == '__main__':
args = parse_arguments()
train(args)
|
dilation_shapes_test = [
(1, 3, 448, 448, 64, 7, 7, 2, 3, 2, 1),
(1, 64, 112, 112, 192, 3, 3, 1, 1, 2, 1),
(1, 192, 56, 56, 128, 1, 1, 1, 0, 2, 1),
(1, 128, 56, 56, 256, 3, 3, 1, 1, 2, 1),
]
dilation_shapes = [
# yolo
(1, 256, 56, 56, 256, 1, 1, 1, 0, 2, 1), # conv5 4
(1, 256, 56, 56, 512, 3, 3, 1, 1, 2, 1), # conv6 5
(1, 512, 28, 28, 256, 1, 1, 1, 0, 2, 1), # conv7 6
(1, 256, 28, 28, 512, 3, 3, 1, 1, 2, 1), # conv8 7
(1, 512, 28, 28, 512, 1, 1, 1, 0, 2, 1), # conv15 8
(1, 512, 28, 28, 1024, 3, 3, 1, 1, 2, 1), # conv16 9
(1, 1024, 14, 14, 512, 1, 1, 1, 0, 2, 1), # conv17 10
(1, 512, 14, 14, 1024, 3, 3, 1, 1, 2, 1), # conv18 11
(1, 1024, 14, 14, 1024, 3, 3, 1, 1, 2, 1), # conv21 12
(1, 1024, 14, 14, 1024, 3, 3, 2, 1, 2, 1), # conv22 13
(1, 1024, 7, 7, 1024, 3, 3, 1, 1, 2, 1), # conv23 14
] |
# -*- coding: utf-8 -*-
"""
Created on May 21, 2014
@author: HubbeKing, Tyranic-Moron
"""
from CommandInterface import CommandInterface
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
import GlobalVars
class Alias(CommandInterface):
triggers = ['alias']
help = 'alias <alias> <command> <params> - aliases <alias> to the specified command and parameters\n' \
'you can specify where parameters given to the alias should be inserted with $1, $2, $n. ' \
'The whole parameter string is $0. $sender and $channel can also be used.'
aliases = {}
def onLoad(self):
pass
#Here you can load in aliases from your chosen storage. Just make sure to save them in newAlias aswell.
def newAlias(self, alias, command):
self.aliases[alias] = command
def aliasedMessage(self, message):
if message.Command in self.aliases.keys():
alias = self.aliases[message.Command]
newMsg = message.MessageString.replace(message.Command, " ".join(alias), 1)
if "$sender" in newMsg:
newMsg = newMsg.replace("$sender", message.User.Name)
if "$channel" in newMsg:
newMsg = newMsg.replace("$channel", message.Channel)
newMsg = newMsg.replace(message.Parameters, "")
if "$0" in newMsg:
newMsg = newMsg.replace("$0", " ".join(message.ParameterList))
if len(message.ParameterList) >= 1:
for i, param in enumerate(message.ParameterList):
if newMsg.find("${}+".format(i+1)) != -1:
newMsg = newMsg.replace("${}+".format(i+1), " ".join(message.ParameterList[i:]))
else:
newMsg = newMsg.replace("${}".format(i+1), param)
return IRCMessage(message.Type, message.User.String, message.Channel, newMsg)
def shouldExecute(self, message):
return True
def execute(self, message):
"""
@type message: IRCMessage
"""
if message.Command in self.triggers:
if message.User.Name not in GlobalVars.admins:
return IRCResponse(ResponseType.Say, "Only my admins may create new aliases!", message.ReplyTo)
if len(message.ParameterList) <= 1:
return IRCResponse(ResponseType.Say, "Alias what?", message.ReplyTo)
triggerFound = False
for (name, command) in self.bot.moduleHandler.commands.items():
if message.ParameterList[0] in command.triggers:
return IRCResponse(ResponseType.Say,
"'{}' is already a command!".format(message.ParameterList[0]),
message.ReplyTo)
if message.ParameterList[1] in command.triggers:
triggerFound = True
if not triggerFound:
return IRCResponse(ResponseType.Say,
"'{}' is not a valid command!".format(message.ParameterList[1]),
message.ReplyTo)
if message.ParameterList[0] in self.bot.moduleHandler.commandAliases:
return IRCResponse(ResponseType.Say,
"'{}' is already an alias!".format(message.ParameterList[0]),
message.ReplyTo)
newAlias = []
for word in message.ParameterList[1:]:
newAlias.append(word.lower())
self.newAlias(message.ParameterList[0], newAlias)
return IRCResponse(ResponseType.Say,
"Created a new alias '{}' for '{}'.".format(message.ParameterList[0],
" ".join(message.ParameterList[1:])),
message.ReplyTo)
elif message.Command in self.aliases.keys():
self.bot.moduleHandler.handleMessage(self.aliasedMessage(message))
|
import unittest
from BattleShips.tests import BattleShipsTest
from BattleShips.controller import GameController
class TestController(BattleShipsTest):
def setUp(self):
pass
def test_game_controller(self):
game_controller = GameController()
self.assertOutput(game_controller.game_play,
"""012345678910
A..........
B..........
C..........
D..........
E..........
F..........
G..........
H..........
I..........
J..........""")
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python3
import signal
import asyncio
import argparse
import json
import sys
import logging
from logging.handlers import RotatingFileHandler
import hookman.http as http
"""
HookMan main() module.
HookMan module that contains main() along with argument parsing, instantiation of the HTTP Objects,
also creates the loop and kicks everything off
"""
__version__ = "0.1.0"
class HMMain():
"""
Class to encapsulate all main() functionality.
"""
def __init__(self):
"""
Constructor.
"""
self.logger = None
self.http_object = None
self.stopping = False
def init_signals(self):
"""
Setup signal handling.
"""
signal.signal(signal.SIGINT, self.handle_sig)
signal.signal(signal.SIGTERM, self.handle_sig)
def handle_sig(self, signum, frame):
"""
Function to handle signals.
SIGINT and SIGTERM both result in AD shutting down
:param signum: signal number being processed
:param frame: frame - unused
"""
if signum == signal.SIGINT:
self.logger.info("Keyboard interrupt")
self.stop()
if signum == signal.SIGTERM:
self.logger.info("SIGTERM Recieved")
self.stop()
def stop(self):
"""
Called by the signal handler to shut HookMan down.
:return: None
"""
self.logger.info("Hookman is shutting down")
self.http_object.stop()
self.stopping = True
# noinspection PyBroadException,PyBroadException
def run(self, config):
"""
Start HookMan up after initial argument parsing.
Create loop, createHTTP Object.
:param http: config dictionary
"""
try:
loop = asyncio.get_event_loop()
self.logger.info("Initializing HTTP")
self.http_object = http.HTTP(__version__, loop, self.logger, config["http"], config["mappings"], self.test, self.config_file, self.reload)
self.logger.info("Start Main Loop")
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
#
# Now we are shutting down - perform any necessary cleanup
#
self.logger.info("HookMan is stopped.")
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error during run()")
self.logger.warning('-' * 60, exc_info=True)
self.logger.warning('-' * 60)
self.logger.debug("End Loop")
self.logger.info("AppDeamon Exited")
# noinspection PyBroadException
def main(self):
"""
Initial HookMan entry point.
Parse command line arguments, load configuration, set up logging.
"""
self.init_signals()
# Get command line args
parser = argparse.ArgumentParser()
parser.add_argument("config", help="full or relative path to config file", type=str)
parser.add_argument("-t", "--test", help="Test mode - print forwarding request and don't call", action='store_true')
parser.add_argument("-r", "--reload", help="Reload config for every request - for testing purposes", action='store_true')
args = parser.parse_args()
self.config_file = args.config
self.test = args.test
self.reload = args.reload
try:
#
# Read config file
#
with open(self.config_file) as json_file:
config = json.load(json_file)
except Exception as e:
print("ERROR", "Error loading configuration file: {}".format(e))
sys.exit(1)
# Setup logging
self.logger = logging.getLogger("log1")
if "log" in config:
log_config = config["log"]
else:
log_config = None
if log_config is not None and "level" in log_config:
level = log_config["level"]
else:
level = "INFO"
self.logger.setLevel(level)
self.logger.propagate = False
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
fh = None
if log_config is not None and "logfile" in log_config:
if log_config["logfile"] != "STDOUT":
if "log_size" in log_config:
log_size = int(log_config["log_size"])
else:
log_size = 10000000
if "log_generations" in log_config:
log_generations = int(log_config["log_generations"])
else:
log_generations = 3
fh = RotatingFileHandler(log_config["logfile"], maxBytes=log_size, backupCount=log_generations)
else:
# Default for StreamHandler() is sys.stderr
fh = logging.StreamHandler(stream=sys.stdout)
fh.setLevel(level)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# Startup message
self.logger.info("HookMan Version %s starting", __version__)
self.logger.info("Configuration read from: %s", self.config_file)
if "http" not in config:
self.logger.error("Missing 'http' section in %s - exiting", self.config_file)
sys.exit(1)
if "mappings" not in config:
self.logger.error("Missing 'mappings' section in %s - exiting", self.config_file)
sys.exit(1)
self.run(config)
def main():
"""
Called when run from the command line.
"""
hookman = HMMain()
hookman.main()
if __name__ == "__main__":
main() |
#!/usr/bin/env python
import unittest, os, sys, subprocess as sp, shlex, numpy as np
import mcvine, mcvine.resources
from mcni.neutron_storage.idf_usenumpy import count
class TestCase(unittest.TestCase):
def test_count(self):
"mcvine neutronstorage count"
cmd = "mcvine neutronstorage count neutrons"
if os.system(cmd):
raise RuntimeError("%s failed" % cmd)
return
def test_totalintensity(self):
"mcvine neutronstorage totalintensity"
cmd = "mcvine neutronstorage totalintensity neutrons"
o = sp.check_output(shlex.split(cmd))
assert np.isclose(float(o), 6.81612525211e+12)
return
def test_extract(self):
"mcvine neutronstorage extract"
cmd = "mcvine neutronstorage extract neutrons neutrons.extracted --start 0 --end 10"
if os.system(cmd):
raise RuntimeError("%s failed" % cmd)
assert count('neutrons.extracted')==10
return
def test_print(self):
"mcvine neutronstorage extract"
cmd = "mcvine neutronstorage print neutrons --start 0 --end 5"
if os.system(cmd):
raise RuntimeError("%s failed" % cmd)
return
def test_merge(self):
"mcvine neutronstorage merge"
if os.path.exists('merged_neutrons.1'): os.remove('merged_neutrons.1')
cmd = "mcvine neutronstorage merge --files neutrons,neutrons --out merged_neutrons.1"
if os.system(cmd):
raise RuntimeError("%s failed" % cmd)
assert count('merged_neutrons.1')==2*count('neutrons')
if os.path.exists('merged_neutrons.2'): os.remove('merged_neutrons.2')
cmd = "mcvine neutronstorage extract neutrons neutrons.extracted-for-merge-test --start 0 --end 10"
if os.system(cmd):
raise RuntimeError("%s failed" % cmd)
cmd = "mcvine neutronstorage merge --files neutrons,neutrons.*-merge-test --out merged_neutrons.2"
if os.system(cmd):
raise RuntimeError("%s failed" % cmd)
assert count('merged_neutrons.2')==count('neutrons')+10
return
if __name__ == '__main__': unittest.main()
|
import os
import platform
import sys
import time
from datetime import datetime
from pytz import timezone
"""
Simple proof of concept python application
"""
def looper():
"""
Simple proof of concept function.
Loops indefinitely, prints to stdout and writes to file.
:return:
"""
while True:
string = (f"Hello var1:{os.getenv('VAR1')} var2:{os.getenv('VAR2')} var3:{os.getenv('VAR3')}! "
f"the time now is {datetime.now(tz=timezone('Europe/Athens'))} "
f"and I am going to sleep for 1 second.")
print(string)
with open("/data/out.txt", "a") as f:
f.write(string + '\n')
f.flush()
time.sleep(1)
if __name__ == "__main__":
print(f"python: {platform.python_version()} "
f"now: {datetime.now(tz=timezone('Europe/Athens')).strftime('%Y-%m-%dT%H:%M:%S')}")
sys.exit(looper())
|
from typing import Any, Dict, List, Optional
from glom import glom
from restapi import decorators
from restapi.rest.definition import EndpointResource, Response
from restapi.services.authentication import User
from restapi.utilities.globals import mem
from restapi.utilities.logs import log
class NewSwaggerSpecifications(EndpointResource):
"""
Specifications output throught Swagger (open API) standards
"""
labels = ["specifications"]
@decorators.auth.optional(allow_access_token_parameter=True)
@decorators.endpoint(
path="/specs",
summary="Endpoints specifications based on OpenAPI format",
responses={200: "Endpoints JSON based on OpenAPI Specifications"},
)
def get(self, user: Optional[User]) -> Response:
specs = mem.docs.spec.to_dict()
if user:
# Set security requirements for endpoint
for key, data in specs.items():
# Find endpoint mapping flagged as private
if key == "paths":
for uri, endpoint in data.items():
u = uri.replace("{", "<").replace("}", ">")
for method, definition in endpoint.items():
auth_required = glom(
mem.authenticated_endpoints,
f"{u}.{method}",
default=False,
)
if auth_required:
definition["security"] = [{"Bearer": []}]
return self.response(specs)
log.info("Unauthenticated request, filtering out private endpoints")
# Remove sensible data
filtered_specs: Dict[str, Dict[str, Dict[str, Any]]] = {}
# schemaName => True|False (private|public)
privatedefs: Dict[str, bool] = {}
# schemaName => [list of definitions including this]
parentdefs: Dict[str, List[Any]] = {}
for key, data in specs.items():
# Find endpoint mapping flagged as private
if key == "paths":
for uri, endpoint in data.items():
u = uri.replace("{", "<").replace("}", ">")
for method, definition in endpoint.items():
is_private = glom(
mem.private_endpoints,
f"{u}.{method}",
default=False,
)
defs = definition.get("parameters", [])[:]
for p in defs:
if "schema" not in p:
continue
if "$ref" not in p["schema"]:
continue
ref = p["schema"]["$ref"]
def_name = ref.replace("#/definitions/", "")
privatedefs.setdefault(def_name, True)
# Will be True if all occurrences are private
privatedefs[def_name] = privatedefs[def_name] and is_private
if is_private:
log.debug("Skipping {} {}", method, uri)
continue
auth_required = glom(
mem.authenticated_endpoints,
f"{u}.{method}",
default=False,
)
if auth_required:
definition["security"] = [{"Bearer": []}]
filtered_specs.setdefault(key, {})
filtered_specs[key].setdefault(uri, {})
filtered_specs[key][uri].setdefault(method, definition)
# definitions
elif key == "definitions":
# Saving definition inclusion, will be used later to determine
# if a definition is private or not
# If a definition is referenced by an endpoint, the definition
# visibility matches the endpoint visibility
# If a definition is referenced by other definitions, its visibility
# will be calculated as AND(parent definitions)
# Verification postponed
for schema, definition in data.items():
# parentdefs
for d in definition.get("properties", {}).values():
# Generated by Nested without allow_none
if "$ref" in d:
ref = d["$ref"]
def_name = ref.replace("#/definitions/", "")
parentdefs.setdefault(def_name, [])
parentdefs[def_name].append(schema)
# Generated by Nested with allow_none=True
if "allOf" in d:
for nested in d["allOf"]:
ref = nested["$ref"]
def_name = ref.replace("#/definitions/", "")
parentdefs.setdefault(def_name, [])
parentdefs[def_name].append(schema)
else:
filtered_specs.setdefault(key, data)
if "definitions" in specs:
filtered_specs.setdefault("definitions", {})
for schema, definition in specs["definitions"].items():
if self.is_definition_private(schema, privatedefs, parentdefs):
log.debug("Skipping private definition {}", schema)
continue
filtered_specs["definitions"].setdefault(schema, definition)
return self.response(filtered_specs)
def is_definition_private(
self,
schema_name: str,
privatedefs: Dict[str, bool],
parentdefs: Dict[str, Any],
recursion: int = 0,
) -> bool:
# can be True|False|None
from_private_endpoint = privatedefs.get(schema_name, None)
# Can be None|empty list|list
parents = parentdefs.get(schema_name, None)
# This definition is not used by any endpoint or other definitions
# Probably it is used with a marshal_with to serialize a response...
# Response Schemas are not reported in the spec by FlaskApiSpec
# so for now let's consider it as private and filter out
if from_private_endpoint is None and parents is None:
return True
# This definition is not used by other definitions => the visibility
# is only given by endpoints visibility if any
if not parents and from_private_endpoint is not None:
return from_private_endpoint
# parents is not expected to be a non-empty list,
# otherwise something is going wrong
# This if should always fail
if not parents: # pragma: no cover
log.warning(
"Invalid {} definition, unable to determine the visibility {} {}",
schema_name,
from_private_endpoint,
parents,
)
# Let's consider it as private and filter it out
return True
# Are we in a loop due to a cyclic dependency? Let's stop it
if recursion > 10: # pragma: no cover
# Let's consider it as private and filter it out
return True
is_private = True
for parent in parents:
priv = self.is_definition_private(
parent,
privatedefs,
parentdefs,
recursion + 1, # prevent infinite recursion
)
# The definition is private if only included in private definitions
# If used in at least one public definition, let's consider it as public
is_private = is_private and priv
return is_private
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 07 16:31:26 2013
@author: Will Rhodes
"""
import numpy as np
import math
#creates the boolean map from a list of skyObjects (data) with a specified
class BooleanMap:
resolution = 360
dSize = 360./resolution
countMap = np.zeros([resolution,resolution])
#constuctor
def __init__(self,data,bins=360):
self.resolution = bins
self.dSize = 360./self.resolution
self.countMap = np.zeros([self.resolution,self.resolution])
for point in data:
if hasattr(point,'RA'): #if it has RA assume it has DEC
raPos = int(math.floor(point.RA / self.dSize))
decPos = int(math.floor(point.DEC / self.dSize))
#row x col or dec x ra
self.countMap[raPos,decPos] = self.countMap[raPos,decPos] +1
else:
print("can't parse data into the map")
def inSurvey(self,ra,dec):
raPos = int(math.floor(ra / self.dSize))
decPos = int(math.floor(dec / self.dSize))
cell = self.countMap[raPos,decPos]
if cell == 0:
return False
else:
return True
def toString(self):
return np.array_str(self.countMap)
|
from models.order import Order
from models.product import Product
from tests import testing_app
from tests.helpers import *
@with_test_db((Product,))
def test_index_displays_product_names():
create_test_product("Toothbrush", "2.00")
create_test_product("Toothpaste", "1.00")
res = testing_app.get("/")
assert res.status_code == 200
res_data = res.get_data().decode("utf-8")
assert "Toothbrush" in res_data
assert "Toothpaste" in res_data
@with_test_db((Product,))
def test_view_product_shows_product_name_and_price():
create_test_product("Toothbrush", "2.00")
res = testing_app.get("/Toothbrush")
assert res.status_code == 200
res_data = res.get_data().decode("utf-8")
assert "Toothbrush" in res_data
assert "£2.00" in res_data
@with_test_db((Product,))
def test_view_product_shows_404_if_not_found():
create_test_product("Toothbrush", "2.00")
res = testing_app.get("/Bananas")
assert res.status_code == 404
@with_test_db((Product,))
def test_add_product_to_cart():
p = create_test_product("Floss", "1.50")
p2 = create_test_product("Toothbrush", "2.99")
with testing_app as app_with_session:
app_with_session.get("/")
from flask import session
assert "cart" in session
assert session["cart"] == []
res = app_with_session.post("/add-product-to-cart", data={"product_id": p.id})
assert session["cart"] == [p.id]
assert res.get_json()["cart_items"] == 1
res = app_with_session.post("/add-product-to-cart", data={"product_id": p2.id})
assert session["cart"] == [p.id, p2.id]
assert res.get_json()["cart_items"] == 2
@with_test_db((Order, Product))
def test_checkout_get():
p = create_test_product("Floss", "1.50")
p2 = create_test_product("Toothbrush", "2.99")
with testing_app.session_transaction() as s:
s.clear()
s["cart"] = [p.id, p.id, p2.id]
res = testing_app.get("/checkout")
res_data = res.get_data()
assert b"Floss" in res_data
assert b"Toothbrush" in res_data
assert b"3.00" in res_data
assert b"2.99" in res_data
@with_test_db((Order, Product))
def test_checkout_post(mocker):
mock_send_confirmation_email_delay(mocker)
p = create_test_product("Floss", "1.50")
p2 = create_test_product("Toothbrush", "2.99")
assert Order.select().count() == 0
with testing_app.session_transaction() as s:
s.clear()
s["cart"] = [p.id, p.id, p2.id]
res = testing_app.post(
"/checkout", data={"email": "[email protected]"}, follow_redirects=True
)
res_data = res.get_data()
assert b"Floss" in res_data
assert b"Toothbrush" in res_data
assert b"3.00" in res_data
assert b"2.99" in res_data
assert b"sent to [email protected]" in res_data
assert Order.select().count() == 1
o = Order.get()
assert o.email == "[email protected]"
assert o.products == {
"Floss": {"total": 3.0, "quantity": 2},
"Toothbrush": {"total": 2.99, "quantity": 1},
}
from tasks.send_email import send_confirmation_email
send_confirmation_email.delay.assert_called_once()
assert send_confirmation_email.delay.call_args[0][0] == "[email protected]"
|
#!/usr/local/bin/python2.7
from sys import exit
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
environ["CUDA_VISIBLE_DEVICES"] = ""
import numpy as np
import extra_vars
from subtlenet import config, utils
from subtlenet.backend import obj
from subtlenet.generators.gen import make_coll
basedir = environ['BASEDIR']
figsdir = environ['FIGSDIR']
n_batches = 500
partition = 'test'
p = utils.Plotter()
r = utils.Roccer(y_range=range(-4,1))
OUTPUT = figsdir + '/'
system('mkdir -p %s'%OUTPUT)
components = [
'singletons',
'shallow',
# 'baseline_trunc4_limit50_clf_best',
# 'decorrelated_trunc4_limit50_clf_best',
# 'mse_decorrelated_trunc4_limit50_clf_best',
# 'emd_decorrelated_trunc4_limit50_clf_best',
# 'baseline_4_50',
# 'baseline_Adam_4_10',
'baseline_Adam_4_50',
'baseline_Adam_4_100',
# 'baseline_Adam_7_10',
# 'baseline_Adam_7_50',
# 'baseline_Adam_7_100',
# 'baseline_Nadam',
# 'baseline_RMSprop',
# 'emd',
# 'emd_clf_best',
# 'mean_squared_error',
# 'mean_squared_error_clf_best',
# 'categorical_crossentropy',
# 'categorical_crossentropy_clf_best',
# 'trunc4_limit50_clf_best',
# 'trunc4_limit50',
]
colls = {
't' : make_coll(basedir + '/PARTITION/Top_*_CATEGORY.npy',categories=components),
'q' : make_coll(basedir + '/PARTITION/QCD_*_CATEGORY.npy',categories=components),
}
# run DNN
def predict(data,model):
return data[model]
def access(data, v):
return data['singletons'][:,config.gen_singletons[v]]
def div(data, num, den):
return access(data, num) / np.clip(access(data, den), 0.0001, 999)
def makebins(lo, hi, w):
return np.linspace(lo, hi, int((hi-lo)/w))
f_vars = {
'nprongs' : (lambda x : access(x, 'nprongs'), makebins(0,10,0.1), r'$N_\mathrm{prongs}$'),
'tau32' : (lambda x : div(x, 'tau3', 'tau2'), makebins(0,1.2,0.01), r'$\tau_{32}$'),
'tau32sd' : (lambda x : div(x, 'tau3sd', 'tau2sd'), makebins(0,1.2,0.01), r'$\tau_{32}^\mathrm{sd}$'),
'partonm' : (lambda x : access(x, 'partonm'), makebins(0,400,5), 'Parton mass [GeV]'),
'msd' : (lambda x : access(x, 'msd'), makebins(0.,400.,20.), r'$m_\mathrm{SD}$ [GeV]'),
'pt' : (lambda x : access(x, 'pt'), makebins(250.,1000.,50.), r'$p_\mathrm{T}$ [GeV]'),
'shallow' : (lambda x : x['shallow'], makebins(0,1.2,0.01), r'Shallow (no $p_{T}$) classifier'),
'shallow_roc' : (lambda x : x['shallow'], makebins(0,1.2,0.0001), r'Shallow (no $p_{T}$) classifier'),
# 'baseline_Adam_4_10' : (lambda x : x['baseline_Adam_4_10'], makebins(0,1,0.01), '(4,10)'),
# 'baseline_Adam_4_10_roc' : (lambda x : x['baseline_Adam_4_10'], makebins(0,1,0.0001), '(4,10)'),
'baseline_Adam_4_50' : (lambda x : x['baseline_Adam_4_50'], makebins(0,1,0.01), '(4,50)'),
'baseline_Adam_4_50_roc' : (lambda x : x['baseline_Adam_4_50'], makebins(0,1,0.0001), '(4,50)'),
'baseline_Adam_4_100' : (lambda x : x['baseline_Adam_4_100'], makebins(0,1,0.01), '(4,100)'),
'baseline_Adam_4_100_roc' : (lambda x : x['baseline_Adam_4_100'], makebins(0,1,0.0001), '(4,100)'),
# 'baseline_Adam_7_10' : (lambda x : x['baseline_Adam_7_10'], makebins(0,1,0.01), '(7,10)'),
# 'baseline_Adam_7_10_roc' : (lambda x : x['baseline_Adam_7_10'], makebins(0,1,0.0001), '(7,10)'),
# 'baseline_Adam_7_50' : (lambda x : x['baseline_Adam_7_50'], makebins(0,1,0.01), '(7,50)'),
# 'baseline_Adam_7_50_roc' : (lambda x : x['baseline_Adam_7_50'], makebins(0,1,0.0001), '(7,50)'),
# 'baseline_Adam_7_100' : (lambda x : x['baseline_Adam_7_100'], makebins(0,1,0.01), '(7,100)'),
# 'baseline_Adam_7_100_roc' : (lambda x : x['baseline_Adam_7_100'], makebins(0,1,0.0001), '(7,100)'),
# 'trunc4_limit50_roc' : (lambda x : x['trunc4_limit50'], makebins(0,1,0.0001), 'Decorr (4,10)'),
# 'emd' : (lambda x : x['emd'], makebins(0,1,0.01), 'Decorr (4,10)'),
# 'emd_clf_best' : (lambda x : x['emd_clf_best'], makebins(0,1,0.01), 'Decorr (4,10)'),
# 'emd_roc' : (lambda x : x['emd'], makebins(0,1,0.0001), 'Decorr (4,10)'),
# 'emd_clf_best_roc' : (lambda x : x['emd_clf_best'], makebins(0,1,0.0001), 'Decorr (4,10)'),
# 'mean_squared_error' : (lambda x : x['mean_squared_error'], makebins(0,1,0.01), 'Decorr (4,10)'),
# 'mean_squared_error_clf_best' : (lambda x : x['mean_squared_error_clf_best'], makebins(0,1,0.01), 'Decorr (4,10)'),
# 'mean_squared_error_roc' : (lambda x : x['mean_squared_error'], makebins(0,1,0.0001), 'Decorr (4,10)'),
# 'mean_squared_error_clf_best_roc' : (lambda x : x['mean_squared_error_clf_best'], makebins(0,1,0.0001), 'Decorr (4,10)'),
# 'categorical_crossentropy' : (lambda x : x['categorical_crossentropy'], makebins(0,1,0.01), 'Decorr (4,10)'),
# 'categorical_crossentropy_clf_best' : (lambda x : x['categorical_crossentropy_clf_best'], makebins(0,1,0.01), 'Decorr (4,10)'),
# 'categorical_crossentropy_roc' : (lambda x : x['categorical_crossentropy'], makebins(0,1,0.0001), 'Decorr (4,10)'),
# 'categorical_crossentropy_clf_best_roc' : (lambda x : x['categorical_crossentropy_clf_best'], makebins(0,1,0.0001), 'Decorr (4,10)'),
}
roc_vars = {
'tau32':(r'$\tau_{32}$',0,':'),
'tau32sd':(r'$\tau_{32}^\mathrm{SD}$',2,':'),
'shallow_roc':('Shallow',3,':'),
'baseline_Nadam_roc':('Baseline Nadam',12),
'baseline_RMSprop_roc':('Baseline RMSprop',11),
'trunc4_limit50_clf_best_roc':('Baseline 2',4,'--'),
'trunc4_limit50_roc':('Baseline 3',4,':'),
'emd_roc':('EMD',7),
'emd_clf_best_roc':('EMD best',7,'--'),
'mean_squared_error_roc':('MSE',6),
'mean_squared_error_clf_best_roc':('MSE best',6,'--'),
'categorical_crossentropy_roc':('CCE',5),
'categorical_crossentropy_clf_best_roc':('CCE best',5,'--'),
'baseline_Adam_4_10_roc':('C-LSTM (4,10)',9),
'baseline_Adam_4_50_roc':('C-LSTM (4,50)',10),
'baseline_Adam_4_100_roc':('C-LSTM (4,100)',11),
'baseline_Adam_7_10_roc':('C-LSTM (7,10)',12),
'baseline_Adam_7_50_roc':('C-LSTM (7,50)',13),
'baseline_Adam_7_100_roc':('C-LSTM (7,100)',14),
}
order = [
'tau32',
'tau32sd',
'shallow_roc',
# 'baseline_RMSprop_roc',
# 'baseline_Nadam_roc',
# 'trunc4_limit50_clf_best_roc',
# 'trunc4_limit50_roc',
# 'emd_roc',
# 'emd_clf_best_roc',
# 'mean_squared_error_roc',
# 'mean_squared_error_clf_best_roc',
# 'categorical_crossentropy_roc',
# 'categorical_crossentropy_clf_best_roc',
# 'baseline_Adam_4_10_roc',
'baseline_Adam_4_50_roc',
'baseline_Adam_4_100_roc',
# 'baseline_Adam_7_10_roc',
# 'baseline_Adam_7_50_roc',
# 'baseline_Adam_7_100_roc',
]
# unmasked first
hists = {}
for k,v in colls.iteritems():
hists[k] = v.draw(components=components,
f_vars=f_vars,
n_batches=n_batches, partition=partition)
for k in hists['t']:
ht = hists['t'][k]
hq = hists['q'][k]
for h in [ht, hq]:
h.scale()
if 'roc' in k:
continue
p.clear()
p.add_hist(ht, '3-prong top', 'r')
p.add_hist(hq, '1-prong QCD', 'k')
p.plot(output=OUTPUT+k, xlabel=f_vars[k][2])
r.clear()
r.add_vars(hists['t'],
hists['q'],
roc_vars,
order
)
r.plot(**{'output':OUTPUT+'roc'})
bkg_hists = {k:v for k,v in hists['q'].iteritems()}
# mask the top mass
def f_mask(data):
mass = data['singletons'][:,config.gen_singletons['msd']]
return (mass > 150) & (mass < 200)
hists = {}
for k,v in colls.iteritems():
hists[k] = v.draw(components=components,
f_vars=f_vars,
n_batches=n_batches, partition=partition,
f_mask=f_mask)
for k in hists['t']:
ht = hists['t'][k]
hq = hists['q'][k]
for h in [ht, hq]:
h.scale()
if 'roc' in k:
continue
p.clear()
p.add_hist(ht, '3-prong top', 'r')
p.add_hist(hq, '1-prong QCD', 'k')
p.plot(output=OUTPUT+'mass_'+k, xlabel=f_vars[k][2])
r.clear()
r.add_vars(hists['t'],
hists['q'],
roc_vars,
order
)
r.plot(**{'output':OUTPUT+'mass_roc'})
# get the cuts
thresholds = [0, 0.5, 0.75, 0.9, 0.99, 0.995]
def sculpting(name, f_pred):
try:
h = bkg_hists[name+'_roc']
except KeyError:
h = bkg_hists[name]
tmp_hists = {t:{} for t in thresholds}
f_vars2d = {
'msd' : (lambda x : (x['singletons'][:,config.gen_singletons['msd']], f_pred(x)),
makebins(40,400,20.),
makebins(0,1,0.0001)),
'pt' : (lambda x : (x['singletons'][:,config.gen_singletons['pt']], f_pred(x)),
makebins(400,1000,50.),
makebins(0,1,0.0001)),
'partonm' : (lambda x : (x['singletons'][:,config.gen_singletons['partonm']], f_pred(x)),
makebins(0,400,20.),
makebins(0,1,0.0001)),
}
h2d = colls['q'].draw(components=components,
f_vars={}, f_vars2d=f_vars2d,
n_batches=n_batches, partition=partition)
for t in thresholds:
cut = 0
for ib in xrange(h.bins.shape[0]):
frac = h.integral(lo=0, hi=ib) / h.integral()
if frac >= t:
cut = h.bins[ib]
break
print 'For classifier=%s, threshold=%.3f reached at cut=%.3f'%(name, t, cut )
for k,h2 in h2d.iteritems():
tmp_hists[t][k] = h2.project_onto_x(min_cut=cut)
colors = utils.default_colors
for k in tmp_hists[thresholds[0]]:
p.clear()
p.ymin = 0.1
p.ymax = 1e5
for i,t in enumerate(thresholds):
p.add_hist(tmp_hists[t][k], r'$\epsilon_\mathrm{bkg}=%.3f$'%(1-t), colors[i])
p.plot(output=OUTPUT+'prog_'+name+'_'+k, xlabel=f_vars[k][2], logy=True)
p.clear()
for i,t in enumerate(thresholds):
tmp_hists[t][k].scale()
p.add_hist(tmp_hists[t][k], r'$\epsilon_\mathrm{bkg}=%.3f$'%(1-t), colors[i])
p.plot(output=OUTPUT+'prognorm_'+name+'_'+k, xlabel=f_vars[k][2], logy=False)
# sculpting('emd', f_pred = f_vars['emd'][0])
# sculpting('emd_clf_best', f_pred = f_vars['emd_clf_best'][0])
# sculpting('mean_squared_error', f_pred = f_vars['mean_squared_error'][0])
# sculpting('mean_squared_error_clf_best', f_pred = f_vars['mean_squared_error_clf_best'][0])
# sculpting('categorical_crossentropy', f_pred = f_vars['categorical_crossentropy'][0])
# sculpting('categorical_crossentropy_clf_best', f_pred = f_vars['categorical_crossentropy_clf_best'][0])
# sculpting('tau32sd', f_pred = f_vars['tau32sd'][0])
# sculpting('baseline_Adam_7_100', f_pred = f_vars['baseline_Adam_7_100'][0])
# sculpting('shallow', f_pred = f_vars['shallow'][0])
#
|
#!/usr/bin/env python3
import os
from collections import namedtuple
from itertools import chain
import json
# declare normalized data structure
LocalBusiness = namedtuple('LocalBusiness', ['department', 'address', 'geo', 'name', 'url', 'telephone', 'faxNumber', 'email', 'id'])
Department = namedtuple('Department', ['name', 'telephone', 'openingHours', 'openingHoursSpecification'])
GeoCoordinates = namedtuple('GeoCoordinates', ['latitude', 'longitude'])
Address = namedtuple('Address', ['addressCountry', 'addressLocality', 'addressRegion', 'postalCode', 'streetAddress'])
OpeningHoursSpecification = namedtuple('OpeningHoursSpecification', ['opens', 'closes', 'dayOfWeek'])
def ToDictionary(obj):
if isinstance(obj, tuple):
return {k: ToDictionary(v) for k, v in vars(obj).items()}
if isinstance(obj, list):
return list(map(ToDictionary, obj))
else:
return obj;
# declare input and output file names
directory_name=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'data'))
input_name=os.path.join(directory_name, 'nissan.json')
output_name=os.path.join(directory_name, 'normalized-nissan.json')
# normalize the data
def ToOpeningHours(obj):
startingHour = obj['startingHour']
closingHour = obj['closingHour']
if startingHour == closingHour:
return None
startingHour = '{0}:{1}'.format(startingHour[:2], startingHour[2:])
closingHour = '{0}:{1}'.format(closingHour[:2], closingHour[2:])
days = obj['days']
if '-' in days:
day_from, day_upto = days.split('-')
days = '{0}-{1}'.format(day_from[0:2], day_upto[0:2])
else:
days = days[0:2]
return '{0} {1}-{2}'.format(days, startingHour, closingHour)
def ToOpeningHoursSpecification(obj):
# hours
startingHour = obj['startingHour']
closingHour = obj['closingHour']
if startingHour == closingHour:
return []
startingHour = '{0}:{1}'.format(startingHour[:2], startingHour[2:])
closingHour = '{0}:{1}'.format(closingHour[:2], closingHour[2:])
# days
n2i = {'Mon': 0, 'Tue': 1, 'Wed': 2, 'Thu': 3, 'Fri': 4, 'Sat': 5, 'Sun': 6}
i2n = {0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thu', 4: 'Fri', 5: 'Sat', 6: 'Sun'}
n2n = {'Mon': 'Monday', 'Tue': 'Tuesday', 'Wed': 'Wednesday', 'Thu': 'Thursday', 'Fri': 'Friday', 'Sat': 'Saturday', 'Sun': 'Sunday'}
days = obj['days']
loop = []
if '-' in days:
vals = days.split('-')
for i in range(n2i[vals[0]], n2i[vals[1]]):
loop.append(n2n[i2n[i]])
else:
loop.append(n2n[days])
# create opening hours
results = []
for day in loop:
results.append(OpeningHoursSpecification(startingHour, closingHour, day))
return results
businesses = []
with open(input_name, 'r') as fd:
dealers = json.load(fd)
for dealer in dealers:
address = Address(
streetAddress = dealer['addressLine1'].title(),
addressRegion = dealer['state'],
postalCode = dealer['zipCode'],
addressLocality = dealer['city'].title(),
addressCountry = 'US'
)
geo = GeoCoordinates(
latitude = dealer['latitude'],
longitude = dealer['longitude']
)
departments = []
if 'serviceHours' in dealer and len(dealer['serviceHours']) > 0:
serviceHours = dealer['serviceHours'].values()
departments.append(Department(
name = 'Service',
telephone = dealer['servicePhone'],
openingHours = [x for x in map(ToOpeningHours, serviceHours) if x is not None],
openingHoursSpecification = [x for x in chain(*map(ToOpeningHoursSpecification, serviceHours))]
))
if 'salesHours' in dealer and len(dealer['salesHours']) > 0:
salesHours = dealer['salesHours'].values()
departments.append(Department(
name = 'Sales',
telephone = dealer['salesPhone'],
openingHours = [x for x in map(ToOpeningHours, salesHours) if x is not None],
openingHoursSpecification = [x for x in chain(*map(ToOpeningHoursSpecification, salesHours))]
))
email = dealer['emailAddress']
if len(email) == 0:
email = None
url = dealer['url']
if len(url) == 0:
url = None
faxNumber = dealer['fax']
if len(faxNumber) == 0:
faxNumber = None
business = LocalBusiness(
id = dealer['dealerId'],
telephone = dealer['phoneNumber'],
faxNumber = faxNumber,
email = email,
name = dealer['name'].title(),
url = url,
department = departments,
address = address,
geo = geo
)
businesses.append(business)
with open(output_name, 'w') as fd:
json.dump(list(map(ToDictionary, businesses)), fd, sort_keys=True, indent=2)
|
import unittest
from EXOSIMS.util.RejectionSampler import RejectionSampler as RS
from EXOSIMS.util.InverseTransformSampler import InverseTransformSampler as ITS
import numpy as np
import scipy.stats
import os
class TestSamplers(unittest.TestCase):
"""Test rejection sampler and inverse transform sampler since both have
same set up
"""
def setUp(self):
self.dev_null = open(os.devnull, 'w')
self.mods = [RS,ITS]
def tearDown(self):
pass
def test_simpSample(self):
"""Test samplers using KS-statistic for two continuous distributions
and ensure that generated values correctly correlate with each one
"""
#uniform dist
ulim = [0,1]
ufun = lambda x: 1.0/np.diff(ulim)
n = int(1e5)
#normal/Gaussian dist
nlim = [-10,10]
nfun = lambda x: np.exp(-x**2./2.0)/np.sqrt(2.0*np.pi)
for mod in self.mods:
print('Testing uniform and normal distributions for sampler: %s'%mod.__name__)
#test uniform distribution
usampler = mod(ufun,ulim[0],ulim[1])
usample = usampler(n)
self.assertGreaterEqual(usample.min(), ulim[0],'Uniform sampler does not obey lower limit for %s.'%mod.__name__)
self.assertLessEqual(usample.max(), ulim[1],'Uniform sampler does not obey upper limit for %s.'%mod.__name__)
#test normal/Gaussian distribution
nsampler = mod(nfun,nlim[0],nlim[1])
nsample = nsampler(n)
self.assertGreaterEqual(nsample.min(), nlim[0],'Normal sampler does not obey lower limit for %s.'%mod.__name__)
self.assertLessEqual(nsample.min(), nlim[1],'Normal sampler does not obey upper limit for %s.'%mod.__name__)
# test that uniform sample is not normal and normal is not uniform
# this test is probabilistic and may fail
nu = scipy.stats.kstest(nsample,'uniform')[1]
if nu > 0.01:
# test fails, so try resampling to get it to pass
nsample = nsampler(n)
nu = scipy.stats.kstest(nsample,'uniform')[1]
self.assertLessEqual(nu,0.01,'Normal sample looks too uniform for %s.'%mod.__name__)
# this test is also probabilistic and may fail
un = scipy.stats.kstest(usample,'norm')[1]
if un > 0.01:
# test fails, so try resampling to get it to pass
usample = usampler(n)
un = scipy.stats.kstest(usample,'norm')[1]
self.assertLessEqual(un,0.01,'Uniform sample looks too normal for %s.'%mod.__name__)
# this test is probabilistic and may fail
pu = scipy.stats.kstest(usample,'uniform')[1]
if pu < 0.01:
# test fails, so try resampling to get it to pass
usample = usampler(n)
pu = scipy.stats.kstest(usample,'uniform')[1]
self.assertGreaterEqual(pu,0.01,'Uniform sample does not look uniform for %s.'%mod.__name__)
# this test is also probabilistic and may fail
pn = scipy.stats.kstest(nsample,'norm')[1]
if pn < 0.01:
# test fails, try resampling to get it to pass
nsample = nsampler(n)
pn = scipy.stats.kstest(nsample,'norm')[1]
self.assertGreaterEqual(pn,0.01,'Normal sample does not look normal for %s.'%mod.__name__)
def test_simpSample_trivial(self):
""" Test simple rejection sampler with trivial inputs
Test method: set up sampling with equal upper and lower bounds
"""
ulim = [0,1]
ufun = lambda x: 1.0/np.diff(ulim)
ufun2 = lambda x: np.ndarray.tolist(ufun) #to trigger conversion to ndarray
n = 10000
for mod in self.mods:
print('Testing trivial input for sampler: %s'%mod.__name__)
sampler = mod(ufun,0.5,0.5)
sample = sampler(n)
sampler2 = mod(ufun2,0.5,0.5)
sample2 = sampler2(n)
self.assertEqual(len(sample),n,'Sampler %s does not return all same value'%mod.__name__)
self.assertTrue(np.all(sample == 0.5),'Sampler %s does not return all values at 0.5'%mod.__name__)
self.assertEqual(len(sample2),n,'Sampler %s does not return all same value'%mod.__name__)
self.assertTrue(np.all(sample2 == 0.5),'Sampler %s does not return all values at 0.5'%mod.__name__)
if __name__ == '__main__':
unittest.main() |
import cv2
import pickle
from io import BytesIO
import time
import requests
from PIL import Image
import numpy as np
from importlib import import_module
import os
from flask import Flask, render_template, Response
from flask import request
import imutils
import json
import requests
from flask import Blueprint, request, jsonify, session
yolo = Blueprint('yolo', 'yolo' ,url_prefix='/yolo')
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(classes[class_id])
color = COLORS[class_id]
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
classes = None
with open("yolov3.txt", 'r') as f:
classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 300, size=(len(classes), 3))
def Yolo(image, net):
try:
#print(image)
#print(image.shape)
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h))
except Exception as e:
print('Failed dnn: '+ str(e))
return image
def gen(height,width, downsample, camera):
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
while True:
url = f'http://{camera}:5000/image.jpg?height={height}&width={width}'
r = requests.get(url) # replace with your ip address
curr_img = Image.open(BytesIO(r.content))
frame = cv2.cvtColor(np.array(curr_img), cv2.COLOR_RGB2BGR)
dwidth = float(width) * (1 - float(downsample))
dheight = float(height) * (1 - float(downsample))
frame = imutils.resize(frame, width=int(dwidth), height=int(dheight))
frame = Yolo(frame, net)
frame = cv2.imencode('.jpg', frame)[1].tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@yolo.route('/image.jpg')
def image():
height = request.args.get('height')
width = request.args.get('width')
downsample = request.args.get('downsample')
camera = request.args.get('camera')
"""Returns a single current image for the webcam"""
return Response(gen(height,width, downsample, camera), mimetype='multipart/x-mixed-replace; boundary=frame')
|
import jsonpickle
from intelhex import IntelHex
from pathlib import Path
from struct import unpack
from calibration_results import CalibrationResults
from cluck2sesame_device import Cluck2SesameDevice
from cluck2sesame_nvm_settings import Cluck2SesameNvmSettings
class CalibrationResultsLoader:
def __init__(self, results_path):
if results_path is None:
raise TypeError('results_path')
if not isinstance(results_path, Path):
results_path = Path(results_path)
if not results_path.is_dir():
raise ValueError('results_path', 'Calibration path needs to be a directory')
self._results_path = results_path
def load(self):
calibration_points = []
for point_file in self._results_path.glob('point.*.json'):
calibration_points.append(jsonpickle.decode(point_file.read_text()))
nvm_settings_hex = IntelHex(str(Path(self._results_path, 'nvm-settings.pre.hex')))
nvm_settings = Cluck2SesameNvmSettings(
Cluck2SesameDevice.NVM_SETTINGS_ADDRESS,
CalibrationResultsLoader._bytes_to_words(nvm_settings_hex.tobinarray()))
return CalibrationResults(nvm_settings, calibration_points)
@staticmethod
def _bytes_to_words(bytes):
return [word for word in unpack('<' + 'H' * (len(bytes) // 2), bytes)]
|
from som.vmobjects.abstract_object import AbstractObject
class String(AbstractObject):
_immutable_fields_ = ["_string"]
def __init__(self, value):
AbstractObject.__init__(self)
self._string = value
def get_embedded_string(self):
return self._string
def __str__(self):
return "\"" + self._string + "\""
def get_class(self, universe):
return universe.stringClass
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# runqslower Trace long process scheduling delays.
# For Linux, uses BCC, eBPF.
#
# This script traces high scheduling delays between tasks being
# ready to run and them running on CPU after that.
#
# USAGE: runqslower [-p PID] [min_us]
#
# REQUIRES: Linux 4.9+ (BPF_PROG_TYPE_PERF_EVENT support).
#
# This measures the time a task spends waiting on a run queue for a turn
# on-CPU, and shows this time as a individual events. This time should be small,
# but a task may need to wait its turn due to CPU load.
#
# This measures two types of run queue latency:
# 1. The time from a task being enqueued on a run queue to its context switch
# and execution. This traces ttwu_do_wakeup(), wake_up_new_task() ->
# finish_task_switch() with either raw tracepoints (if supported) or kprobes
# and instruments the run queue latency after a voluntary context switch.
# 2. The time from when a task was involuntary context switched and still
# in the runnable state, to when it next executed. This is instrumented
# from finish_task_switch() alone.
#
# Copyright 2016 Cloudflare, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 02-May-2018 Ivan Babrou Created this.
from __future__ import print_function
from bcc import BPF
import argparse
from time import strftime
import ctypes as ct
# arguments
examples = """examples:
./runqslower # trace run queue latency higher than 10000 us (default)
./runqslower 1000 # trace run queue latency higher than 1000 us
./runqslower -p 123 # trace pid 123 only
"""
parser = argparse.ArgumentParser(
description="Trace high run queue latency",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-p", "--pid", type=int, metavar="PID", dest="pid",
help="trace this PID only")
parser.add_argument("min_us", nargs="?", default='10000',
help="minimum run queue latecy to trace, in ms (default 10000)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
min_us = int(args.min_us)
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
BPF_HASH(start, u32);
struct rq;
struct data_t {
u32 pid;
char task[TASK_COMM_LEN];
u64 delta_us;
};
BPF_PERF_OUTPUT(events);
// record enqueue timestamp
static int trace_enqueue(u32 tgid, u32 pid)
{
if (FILTER_PID || pid == 0)
return 0;
u64 ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
return 0;
}
"""
bpf_text_kprobe = """
int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p)
{
return trace_enqueue(p->tgid, p->pid);
}
int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p,
int wake_flags)
{
return trace_enqueue(p->tgid, p->pid);
}
// calculate latency
int trace_run(struct pt_regs *ctx, struct task_struct *prev)
{
u32 pid, tgid;
// ivcsw: treat like an enqueue event and store timestamp
if (prev->state == TASK_RUNNING) {
tgid = prev->tgid;
pid = prev->pid;
if (!(FILTER_PID || pid == 0)) {
u64 ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
}
tgid = bpf_get_current_pid_tgid() >> 32;
pid = bpf_get_current_pid_tgid();
u64 *tsp, delta_us;
// fetch timestamp and calculate delta
tsp = start.lookup(&pid);
if (tsp == 0) {
return 0; // missed enqueue
}
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
if (FILTER_US)
return 0;
struct data_t data = {};
data.pid = pid;
data.delta_us = delta_us;
bpf_get_current_comm(&data.task, sizeof(data.task));
// output
events.perf_submit(ctx, &data, sizeof(data));
start.delete(&pid);
return 0;
}
"""
bpf_text_raw_tp = """
RAW_TRACEPOINT_PROBE(sched_wakeup)
{
// TP_PROTO(struct task_struct *p)
struct task_struct *p = (struct task_struct *)ctx->args[0];
return trace_enqueue(p->tgid, p->pid);
}
RAW_TRACEPOINT_PROBE(sched_wakeup_new)
{
// TP_PROTO(struct task_struct *p)
struct task_struct *p = (struct task_struct *)ctx->args[0];
u32 tgid, pid;
bpf_probe_read(&tgid, sizeof(tgid), &p->tgid);
bpf_probe_read(&pid, sizeof(pid), &p->pid);
return trace_enqueue(tgid, pid);
}
RAW_TRACEPOINT_PROBE(sched_switch)
{
// TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next)
struct task_struct *prev = (struct task_struct *)ctx->args[1];
struct task_struct *next= (struct task_struct *)ctx->args[2];
u32 pid, tgid;
long state;
// ivcsw: treat like an enqueue event and store timestamp
bpf_probe_read(&state, sizeof(long), &prev->state);
if (state == TASK_RUNNING) {
bpf_probe_read(&tgid, sizeof(prev->tgid), &prev->tgid);
bpf_probe_read(&pid, sizeof(prev->pid), &prev->pid);
if (!(FILTER_PID || pid == 0)) {
u64 ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
}
bpf_probe_read(&tgid, sizeof(next->tgid), &next->tgid);
bpf_probe_read(&pid, sizeof(next->pid), &next->pid);
u64 *tsp, delta_us;
// fetch timestamp and calculate delta
tsp = start.lookup(&pid);
if (tsp == 0) {
return 0; // missed enqueue
}
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
if (FILTER_US)
return 0;
struct data_t data = {};
data.pid = pid;
data.delta_us = delta_us;
bpf_get_current_comm(&data.task, sizeof(data.task));
// output
events.perf_submit(ctx, &data, sizeof(data));
start.delete(&pid);
return 0;
}
"""
is_support_raw_tp = BPF.support_raw_tracepoint()
if is_support_raw_tp:
bpf_text += bpf_text_raw_tp
else:
bpf_text += bpf_text_kprobe
# code substitutions
if min_us == 0:
bpf_text = bpf_text.replace('FILTER_US', '0')
else:
bpf_text = bpf_text.replace('FILTER_US', 'delta_us <= %s' % str(min_us))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# kernel->user event data: struct data_t
DNAME_INLINE_LEN = 32 # linux/dcache.h
TASK_COMM_LEN = 16 # linux/sched.h
class Data(ct.Structure):
_fields_ = [
("pid", ct.c_uint),
("task", ct.c_char * TASK_COMM_LEN),
("delta_us", ct.c_ulonglong),
]
# process event
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
print("%-8s %-16s %-6s %14s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us))
# load BPF program
b = BPF(text=bpf_text)
if not is_support_raw_tp:
b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup")
b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task")
b.attach_kprobe(event="finish_task_switch", fn_name="trace_run")
print("Tracing run queue latency higher than %d us" % min_us)
print("%-8s %-16s %-6s %14s" % ("TIME", "COMM", "PID", "LAT(us)"))
# read events
b["events"].open_perf_buffer(print_event, page_cnt=64)
while 1:
b.perf_buffer_poll()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ## ###############################################
#
# music.py
# Proyecto final : Centro Multimedia (centro de entretenimiento para la reproducción de películas, videos, música, y fotografías).
#
# Autores:
# Martínez Bárcenas Edgar Daniel
# García Reyes Saúl Michel
# Rodríguez Hernández Alejandro
# License: MIT
#
# ## ###############################################
import sys
from os.path import expanduser
from PyQt5.QtWidgets import *
from PyQt5.QtMultimedia import *
from PyQt5.QtCore import *
class MusicWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Media Player")
self.currentFile = '/badhabits.mp3'
self.currentPlaylist = QMediaPlaylist()
self.player = QMediaPlayer()
self.userAction = -1 #0- stopped, 1- playing 2-paused
self.player.mediaStatusChanged.connect(self.qmp_mediaStatusChanged)
self.player.stateChanged.connect(self.qmp_stateChanged)
self.player.positionChanged.connect(self.qmp_positionChanged)
self.player.volumeChanged.connect(self.qmp_volumeChanged)
self.player.setVolume(100)
#Add Status bar
self.statusBar().showMessage('No Media :: %d'%self.player.volume())
self.homeScreen()
def homeScreen(self):
#Set title of the MainWindow
self.setWindowTitle('Music Player')
#Create Menubar
self.createMenubar()
#Create Toolbar
self.createToolbar()
#Add info screen
#infoscreen = self.createInfoScreen()
#Add Control Bar
controlBar = self.addControls()
#need to add both infoscreen and control bar to the central widget.
centralWidget = QWidget()
centralWidget.setLayout(controlBar)
self.setCentralWidget(centralWidget)
#Set Dimensions of the MainWindow
self.resize(200,100)
#show everything.
self.show()
def createMenubar(self):
menubar = self.menuBar()
filemenu = menubar.addMenu('File')
filemenu.addAction(self.fileOpen())
filemenu.addAction(self.songInfo())
filemenu.addAction(self.folderOpen())
filemenu.addAction(self.exitAction())
def createToolbar(self):
pass
def addControls(self):
controlArea = QVBoxLayout() #centralWidget
seekSliderLayout = QHBoxLayout()
controls = QHBoxLayout()
playlistCtrlLayout = QHBoxLayout()
#creating buttons
playBtn = QPushButton('Play') #play button
pauseBtn = QPushButton('Pause') #pause button
stopBtn = QPushButton('Stop') #stop button
volumeDescBtn = QPushButton('V (-)')#Decrease Volume
volumeIncBtn = QPushButton('V (+)') #Increase Volume
#creating playlist controls
prevBtn = QPushButton('Prev Song')
nextBtn = QPushButton('Next Song')
#creating seek slider
seekSlider = QSlider()
seekSlider.setMinimum(0)
seekSlider.setMaximum(100)
seekSlider.setOrientation(Qt.Horizontal)
seekSlider.setTracking(False)
seekSlider.sliderMoved.connect(self.seekPosition)
#seekSlider.valueChanged.connect(self.seekPosition)
seekSliderLabel1 = QLabel('0.00')
seekSliderLabel2 = QLabel('0.00')
seekSliderLayout.addWidget(seekSliderLabel1)
seekSliderLayout.addWidget(seekSlider)
seekSliderLayout.addWidget(seekSliderLabel2)
#Add handler for each button. Not using the default slots.
playBtn.clicked.connect(self.playHandler)
pauseBtn.clicked.connect(self.pauseHandler)
stopBtn.clicked.connect(self.stopHandler)
volumeDescBtn.clicked.connect(self.decreaseVolume)
volumeIncBtn.clicked.connect(self.increaseVolume)
#Adding to the horizontal layout
controls.addWidget(volumeDescBtn)
controls.addWidget(playBtn)
controls.addWidget(pauseBtn)
controls.addWidget(stopBtn)
controls.addWidget(volumeIncBtn)
#playlist control button handlers
prevBtn.clicked.connect(self.prevItemPlaylist)
nextBtn.clicked.connect(self.nextItemPlaylist)
playlistCtrlLayout.addWidget(prevBtn)
playlistCtrlLayout.addWidget(nextBtn)
#Adding to the vertical layout
controlArea.addLayout(seekSliderLayout)
controlArea.addLayout(controls)
controlArea.addLayout(playlistCtrlLayout)
return controlArea
def playHandler(self):
self.userAction = 1
self.statusBar().showMessage('Playing at Volume %d'%self.player.volume())
if self.player.state() == QMediaPlayer.StoppedState :
if self.player.mediaStatus() == QMediaPlayer.NoMedia:
#self.player.setMedia(QMediaContent(QUrl.fromLocalFile(self.currentFile)))
print(self.currentPlaylist.mediaCount())
if self.currentPlaylist.mediaCount() == 0:
self.openFile()
if self.currentPlaylist.mediaCount() != 0:
self.player.setPlaylist(self.currentPlaylist)
elif self.player.mediaStatus() == QMediaPlayer.LoadedMedia:
self.player.play()
elif self.player.mediaStatus() == QMediaPlayer.BufferedMedia:
self.player.play()
elif self.player.state() == QMediaPlayer.PlayingState:
pass
elif self.player.state() == QMediaPlayer.PausedState:
self.player.play()
def pauseHandler(self):
self.userAction = 2
self.statusBar().showMessage('Paused %s at position %s at Volume %d'%\
(self.player.metaData(QMediaMetaData.Title),\
self.centralWidget().layout().itemAt(0).layout().itemAt(0).widget().text(),\
self.player.volume()))
self.player.pause()
def stopHandler(self):
self.userAction = 0
self.statusBar().showMessage('Stopped at Volume %d'%(self.player.volume()))
if self.player.state() == QMediaPlayer.PlayingState:
self.stopState = True
self.player.stop()
elif self.player.state() == QMediaPlayer.PausedState:
self.player.stop()
elif self.player.state() == QMediaPlayer.StoppedState:
pass
def qmp_mediaStatusChanged(self):
if self.player.mediaStatus() == QMediaPlayer.LoadedMedia and self.userAction == 1:
durationT = self.player.duration()
self.centralWidget().layout().itemAt(0).layout().itemAt(1).widget().setRange(0,durationT)
self.centralWidget().layout().itemAt(0).layout().itemAt(2).widget().setText('%d:%02d'%(int(durationT/60000),int((durationT/1000)%60)))
self.player.play()
def qmp_stateChanged(self):
if self.player.state() == QMediaPlayer.StoppedState:
self.player.stop()
def qmp_positionChanged(self, position,senderType=False):
sliderLayout = self.centralWidget().layout().itemAt(0).layout()
if senderType == False:
sliderLayout.itemAt(1).widget().setValue(position)
#update the text label
sliderLayout.itemAt(0).widget().setText('%d:%02d'%(int(position/60000),int((position/1000)%60)))
def seekPosition(self, position):
sender = self.sender()
if isinstance(sender,QSlider):
if self.player.isSeekable():
self.player.setPosition(position)
def qmp_volumeChanged(self):
msg = self.statusBar().currentMessage()
msg = msg[:-2] + str(self.player.volume())
self.statusBar().showMessage(msg)
def increaseVolume(self):
vol = self.player.volume()
vol = min(vol+5,100)
self.player.setVolume(vol)
def decreaseVolume(self):
vol = self.player.volume()
vol = max(vol-5,0)
self.player.setVolume(vol)
def fileOpen(self):
fileAc = QAction('Open File',self)
fileAc.setShortcut('Ctrl+O')
fileAc.setStatusTip('Open File')
fileAc.triggered.connect(self.openFile)
return fileAc
def openFile(self):
try:
fileChoosen = QFileDialog.getOpenFileUrl(self,'Open Music File', expanduser('~'),'Audio (*.mp3 *.ogg *.wav)','*.mp3 *.ogg *.wav')
if fileChoosen != None:
self.currentPlaylist.addMedia(QMediaContent(fileChoosen[0]))
except TypeError:
pass
def folderOpen(self):
folderAc = QAction('Open Folder',self)
folderAc.setShortcut('Ctrl+D')
folderAc.setStatusTip('Open Folder (Will add all the files in the folder) ')
folderAc.triggered.connect(self.addFiles)
return folderAc
def addFiles(self):
folderChoosen = QFileDialog.getExistingDirectory(self,'Open Music Folder', expanduser('~'))
if folderChoosen != None:
it = QDirIterator(folderChoosen)
it.next()
while it.hasNext():
if it.fileInfo().isDir() == False and it.filePath() != '.':
fInfo = it.fileInfo()
print(it.filePath(),fInfo.suffix())
if fInfo.suffix() in ('mp3','ogg','wav'):
print('added file ',fInfo.fileName())
self.currentPlaylist.addMedia(QMediaContent(QUrl.fromLocalFile(it.filePath())))
it.next()
def songInfo(self):
infoAc = QAction('Info',self)
infoAc.setShortcut('Ctrl+I')
infoAc.setStatusTip('Displays Current Song Information')
infoAc.triggered.connect(self.displaySongInfo)
return infoAc
def displaySongInfo(self):
metaDataKeyList = self.player.availableMetaData()
fullText = '<table class="tftable" border="0">'
for key in metaDataKeyList:
value = self.player.metaData(key)
fullText = fullText + '<tr><td>' + key + '</td><td>' + str(value) + '</td></tr>'
fullText = fullText + '</table>'
infoBox = QMessageBox(self)
infoBox.setWindowTitle('Detailed Song Information')
infoBox.setTextFormat(Qt.RichText)
infoBox.setText(fullText)
infoBox.addButton('OK',QMessageBox.AcceptRole)
infoBox.show()
def prevItemPlaylist(self):
self.player.playlist().previous()
def nextItemPlaylist(self):
self.player.playlist().next()
def exitAction(self):
exitAc = QAction('&Exit',self)
exitAc.setShortcut('Ctrl+Q')
exitAc.setStatusTip('Exit App')
exitAc.triggered.connect(self.closeEvent)
return exitAc
|
"""
.. module:: west_coast_random
:platform: Windows
:synopsis: Example code making a scenario in west_coast_usa and having a
car drive around randomly.
.. moduleauthor:: Marc Müller <[email protected]>
"""
import mmap
import random
import sys
from time import sleep
import numpy as np
import os
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
from beamngpy import BeamNGpy, Scenario, Vehicle, setup_logging
from beamngpy.sensors import Camera, GForces, Electrics, Damage, Timer
from PIL import Image
import PIL
import cv2
import scipy.misc
# globals
training_dir = 'training_images_utah5'
default_model = 'etk800' #'pickup'
default_scenario = 'utah' #'hirochi_raceway' #'automation_test_track' #'west_coast_usa' #'cliff' # smallgrid
dt = 20
base_filename = '{}/{}/{}_{}_'.format(os.getcwd(), training_dir, default_model, default_scenario.replace("_", ""))
def spawn_point(scenario_locale):
if scenario_locale is 'cliff':
#return {'pos':(-124.806, 142.554, 465.489), 'rot':None, 'rot_quat':(0, 0, 0.3826834, 0.9238795)}
return {'pos': (-124.806, 190.554, 465.489), 'rot': None, 'rot_quat': (0, 0, 0.3826834, 0.9238795)}
elif scenario_locale is 'west_coast_usa':
#return {'pos':(-717.121, 101, 118.675), 'rot':None, 'rot_quat':(0, 0, 0.3826834, 0.9238795)}
return {'pos': (-717.121, 101, 118.675), 'rot': None, 'rot_quat': (0, 0, 0.918812, -0.394696)}
#906, 118.78 rot:
elif scenario_locale is 'smallgrid':
return {'pos':(0.0, 0.0, 0.0), 'rot':None, 'rot_quat':(0, 0, 0.3826834, 0.9238795)}
elif scenario_locale == 'automation_test_track':
# starting line
# return {'pos': (487.25, 178.73, 131.928), 'rot': None, 'rot_quat': (0, 0, -0.702719, 0.711467)}
# 30m down track from starting line
return {'pos': (530.25, 178.73, 131.928), 'rot': None, 'rot_quat': (0, 0, -0.702719, 0.711467)}
# handling circuit
#return {'pos': (-294.031, 10.4074, 118.518), 'rot': None, 'rot_quat': (0, 0, 0.708103, 0.706109)}
# rally track
#return {'pos': (-374.835, 84.8178, 115.084), 'rot': None, 'rot_quat': (0, 0, 0.718422, 0.695607)}
# highway
#return {'pos': (-294.791, -255.693, 118.703), 'rot': None, 'rot_quat': (0, 0, -0.704635, 0.70957)}
# default
#return {'pos': (487.25, 178.73, 131.928), 'rot': None, 'rot_quat': (0, 0, -0.702719, 0.711467)}
elif scenario_locale == 'hirochi_raceway':
# default
#return {'pos': (-453.309, 373.546, 25.3623), 'rot': None, 'rot_quat': (0, 0, -0.277698, 0.960669)}
# starting point
return {'pos': (-408.48, 260.232, 25.4231), 'rot': None, 'rot_quat': (0, 0, -0.279907, 0.960027)}
elif scenario_locale == 'utah':
# west highway
#return {'pos': (-922.158, -929.868, 135.534), 'rot': None, 'rot_quat': (0, 0, -0.820165, 0.572127)}
# building site
#return {'pos': (-910.372, 607.927, 265.059), 'rot': None, 'rot_quat': (0, 0, 0.913368, -0.407135)}
# tourist area
#return {'pos': (-528.44, 283.886, 298.365), 'rot': None, 'rot_quat': (0, 0, 0.77543, 0.631434)}
# auto repair zone
#return {'pos': (771.263, -149.268, 144.291), 'rot': None, 'rot_quat': (0, 0, -0.76648, 0.642268)}
# campsite
return {'pos': (566.186, -530.957, 135.291), 'rot': None, 'rot_quat': ( -0.0444918, 0.0124419, 0.269026, 0.962024)}
# default (do not use for training)
# return {'pos': ( 771.263, -149.268, 144.291), 'rot': None, 'rot_quat': (0, 0, -0.76648, 0.642268)}
# parking lot (do not use for training)
# return {'pos': (907.939, 773.502, 235.878), 'rot': None, 'rot_quat': (0, 0, -0.652498, 0.75779)}
def setup_sensors(vehicle):
# Set up sensors
pos = (-0.3, 1, 1.0)
direction = (0, 1, 0)
fov = 120
resolution = (512, 512)
front_camera = Camera(pos, direction, fov, resolution,
colour=True, depth=True, annotation=True)
pos = (0.0, 3, 1.0)
direction = (0, -1, 0)
fov = 90
resolution = (512, 512)
back_camera = Camera(pos, direction, fov, resolution,
colour=True, depth=True, annotation=True)
gforces = GForces()
electrics = Electrics()
damage = Damage()
#lidar = Lidar(visualized=False)
timer = Timer()
# Attach them
vehicle.attach_sensor('front_cam', front_camera)
vehicle.attach_sensor('back_cam', back_camera)
vehicle.attach_sensor('gforces', gforces)
vehicle.attach_sensor('electrics', electrics)
vehicle.attach_sensor('damage', damage)
vehicle.attach_sensor('timer', timer)
return vehicle
def setup_dir(training_dir):
d = "{}/{}".format(os.path.dirname(os.path.realpath(__file__)), training_dir)
if not os.path.isdir(d):
os.mkdir(d)
return "{}/data.csv".format(d)
def truncate(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
s = '{}'.format(f)
if 'e' in s or 'E' in s:
return '{0:.{1}f}'.format(f, n)
i, p, d = s.partition('.')
return '.'.join([i, (d+'0'*n)[:n]])
def main():
global base_filename, training_dir, default_model
f = setup_dir(training_dir)
spawn_pt = spawn_point(default_scenario)
random.seed(1703)
setup_logging()
beamng = BeamNGpy('localhost', 64256, home='C:/Users/merie/Documents/BeamNG.research.v1.7.0.1')
scenario = Scenario(default_scenario, 'research_test')
vehicle = Vehicle('ego_vehicle', model=default_model,
licence='RED', color='Red')
vehicle = setup_sensors(vehicle)
scenario.add_vehicle(vehicle, pos=spawn_pt['pos'], rot=None, rot_quat=spawn_pt['rot_quat'])
# Compile the scenario and place it in BeamNG's map folder
scenario.make(beamng)
# Start BeamNG and enter the main loop
bng = beamng.open(launch=True)
images = []
bng.hide_hud()
bng.set_deterministic() # Set simulator to be deterministic
bng.set_steps_per_second(60) # With 60hz temporal resolution
# Load and start the scenario
bng.load_scenario(scenario)
bng.start_scenario()
vehicle.ai_set_mode('span')
vehicle.ai_drive_in_lane(True)
vehicle.ai_set_aggression(0.1)
# Put simulator in pause awaiting further inputs
bng.pause()
assert vehicle.skt
# Send random inputs to vehice and advance the simulation 20 steps
imagecount = 0
wheelvel = [0.1, 0.1, 0.1]
with open(f, 'w') as datafile:
datafile.write('filename,timestamp,steering_input,throttle_input,brake_input,driveshaft,engine_load,fog_lights,fuel,'
'lowpressure,oil,oil_temperature,parkingbrake,rpm,water_temperature\n')
#for _ in range(1024):
for _ in range(32768):
#throttle = 1.0 #random.uniform(0.0, 1.0)
#steering = random.uniform(-1.0, 1.0)
#brake = random.choice([0, 0, 0, 1])
#vehicle.control(throttle=throttle)
# collect images
sensors = bng.poll_sensors(vehicle)
image = sensors['front_cam']['colour'].convert('RGB')
imagecount += 1
filename = "{}{}.bmp".format(base_filename, imagecount)
# collect ancillary data
datafile.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n'.format(filename,
str(round(sensors['timer']['time'], 2)),
sensors['electrics']['steering_input'],
sensors['electrics']['throttle_input'],
sensors['electrics']['brake_input'],
sensors['electrics']['driveshaft'],
sensors['electrics']['engine_load'],
sensors['electrics']['fog_lights'],
sensors['electrics']['fuel'],
sensors['electrics']['lowpressure'],
sensors['electrics']['oil'],
sensors['electrics']['oil_temperature'],
sensors['electrics']['parkingbrake'],
sensors['electrics']['rpm'],
sensors['electrics']['water_temperature'],))
# save the image
image.save(filename)
# step sim forward
bng.step(20)
print('{} seconds passed.'.format(str(round(sensors['timer']['time'], 2))))
# check for stopping condition
for i in range(len(wheelvel)-1):
wheelvel[i] = wheelvel[i+1]
wheelvel[2] = float(sensors['electrics']['wheelspeed'])
print('wheelvel = {}'.format(sum(wheelvel) / 3.0 ))
if sum(wheelvel) / 3.0 == 0.0:
print("avg wheelspeed is zero; exiting...")
bng.close()
break
if __name__ == '__main__':
main()
|
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import inspect
import os, os.path as osp
import zipfile
def current_function_name(depth=1):
return inspect.getouterframes(inspect.currentframe())[depth].function
def make_zip_archive(src_path, dst_path):
with zipfile.ZipFile(dst_path, 'w') as archive:
for (dirpath, _, filenames) in os.walk(src_path):
for name in filenames:
path = osp.join(dirpath, name)
archive.write(path, osp.relpath(path, src_path))
def unzip_archive(src_path, dst_path):
if zipfile.is_zipfile(src_path):
zipfile.ZipFile(src_path).extractall(dst_path)
|
import torch
import torch.nn as nn
class GANLoss(nn.Module):
def __init__(self, gan_mode="vanilla", real_label=0.9, fake_label=0.1):
"""
Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.gan_mode = gan_mode
self.register_buffer("real_label", torch.tensor(real_label))
self.register_buffer("fake_label", torch.tensor(fake_label))
self.loss = nn.MSELoss()
def get_target_tensor(self, prediction: torch.tensor, target_is_real: bool):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
target_tensor = self.real_label if target_is_real else self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction: torch.tensor, target_is_real: bool):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - typically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ["lsgan", "vanilla"]:
target_tensor = self.get_target_tensor(prediction, target_is_real)
target_tensor = target_tensor.to(prediction.device)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode in ["wgangp"]:
loss = -prediction.mean() if target_is_real else prediction.mean()
return loss
class TIMEBANDLoss:
def __init__(self, config: dict):
# Set configuration
self.set_config(config)
# Critetion
device = self.device
self.criterion_l1n = nn.SmoothL1Loss().to(device)
self.criterion_l2n = nn.MSELoss().to(device)
self.criterion_adv = GANLoss(real_label=0.9, fake_label=0.1).to(device)
self.criterion_gp = GANLoss("wgangp", real_label=0.9, fake_label=0.1).to(device)
# Generator Loss
self.init_loss()
def set_config(self, config: dict):
"""
Configure settings related to the data set.
params:
config: Dataset configuration dict
`config['core'] & config['dataset']`
"""
self.__dict__ = {**config, **self.__dict__}
def init_loss(self):
# Discriminator loss
self.errD_real = 0
self.errD_fake = 0
self.errD_GP = 0
# Generator loss
self.err_Gen = 0
self.errG_l1 = 0
self.errG_l2 = 0
self.errG_GP = 0
return self.loss()
def gen_loss(self, true, pred, DGx):
errG = self.GANloss(DGx, target_is_real=False)
errl1 = self.l1loss(true, pred)
errl2 = self.l2loss(true, pred)
errG_GP = self.grad_penalty(true, pred)
self.err_Gen += errG
self.errG_l1 += errl1
self.errG_l2 += errl2
self.errG_GP += errG_GP
return errG + errl1 + errl2 + errG_GP
def dis_loss(self, true, pred, Dy, DGx, critic: bool = False):
if critic:
errD_real = self.WGANloss(Dy, target_is_real=True)
errD_fake = self.WGANloss(DGx, target_is_real=False)
errD_GP = self.grad_penalty(true, pred)
return errD_fake + errD_real + errD_GP
else:
errD_real = self.GANloss(Dy, target_is_real=True)
errD_fake = self.GANloss(DGx, target_is_real=False)
self.errD_real += errD_real
self.errD_fake += errD_fake
return errD_fake + errD_real
def GANloss(self, D, target_is_real):
return self.criterion_adv(D, target_is_real)
def WGANloss(self, D, target_is_real):
return self.criterion_gp(D, target_is_real)
def l1loss(self, true, pred):
return self.l1_weight * self.criterion_l1n(pred, true)
def l2loss(self, true, pred):
return self.l2_weight * self.criterion_l2n(pred, true)
def grad_penalty(self, true, pred):
return self.gp_weight * self._grad_penalty(true, pred)
def _grad_penalty(self, true, pred):
gradient_sqr = torch.square(true - pred)
gradient_sqr_sum = torch.sum(gradient_sqr)
gradient_l2_norm = torch.sqrt(gradient_sqr_sum)
gradient_penalty = torch.square(1 - gradient_l2_norm) / true.size(0)
return gradient_penalty
def loss(self, i: int = 0):
errG = self.err_Gen + self.errG_l1 + self.errG_l2 + self.errG_GP
errD = self.errD_fake + self.errD_real
losses = {
"D": f"{errD / (i + 1):6.3f}",
"R": f"{self.errD_real / (i + 1):6.3f}",
"F": f"{self.errD_fake / (i + 1):6.3f}",
"G": f"{errG / (i + 1):6.3f}",
"G_": f"{self.err_Gen / (i + 1):6.3f}",
"L1": f"{self.errG_l1 / (i + 1):6.3f}",
"L2": f"{self.errG_l2 / (i + 1):6.3f}",
"GP": f"{self.errG_GP / (i + 1):6.3f}",
}
return losses
|
import trio
import remi.gui as G
from cloud_ui.apps.application import Application
try:
from cloud_ui.cloudapp import UICloud
except ImportError:
pass
class TelegramNotificationManageApplication(Application):
async def init(self, *args):
await self.cloud.wait_service("telegram_bot")
def init__gui__(self):
self.vbox_list_requests = G.VBox(width="100%")
self.vbox_list_requests.append(G.Label("[Pending user requests]"))
self.button_update = G.Button(u"🗘")
self.button_update.onclick.do(self.a(self.on_update_all))
self.vbox_publics = G.VBox(width="100%")
self.vbox_publics.append(G.Label("[Publics]"))
self.hbox_create_new = G.HBox(width="100%")
self.edit_name = G.TextInput()
self.button_add_new = G.Button("+")
self.hbox_create_new.append([self.edit_name, self.button_add_new])
self.button_add_new.onclick.do(self.a(self.on_add_new))
self.vbox_publish_form = G.VBox(width="100%")
self.select_channels = G.DropDown()
self.edit_publish =G.TextInput()
self.button_publish = G.Button("publish")
self.button_publish.onclick.do(self.a(self.on_publish))
self.vbox_publish_form.append([
G.Label("[new publishment]"),
self.select_channels,
self.edit_publish,
self.button_publish
])
self.publics_controls = dict()
def build(self):
vbox = G.VBox(width="600")
vbox.style['align'] = "left"
vbox.append([self.button_update, self.vbox_list_requests, self.vbox_publics, self.hbox_create_new, self.vbox_publish_form])
return vbox
@classmethod
def get_name(cls):
return "telegram_notification"
@classmethod
def list(self):
pass
async def process(self, name: str, arguments: 'Any'):
pass
async def on_publish(self, *args):
text_to_publish = self.edit_publish.get_value()
self.button_publish.set_enabled(False)
channel = self.select_channels.get_value()
self.edit_publish.set_text("")
service = await self.cloud.wait_service("telegram_bot")
await service.request("publish", dict(channel=channel, payload=text_to_publish))
self.notify("published!")
self.button_publish.set_enabled(True)
async def on_update_all(self, *args):
self.select_channels.empty()
await self.on_update_publics()
await self.on_update_requests()
async def on_update_requests(self, *args):
service = await self.cloud.wait_service("telegram_bot")
pending_requests = await service.request("list_pending")
for chat_id in pending_requests:
await self.show_pending_request(chat_id, pending_requests[chat_id])
async def on_update_publics(self, *args):
service = await self.cloud.wait_service("telegram_bot")
publics = await service.request("list")
print("publics list gotten:", publics)
for public in publics:
await self.show_public(public)
async def show_pending_request(self, chat_id, info):
approve_button = G.Button("approve")
hbox = G.HBox(width="100%")
hbox.append([
G.Label(f"{info}", width=150),
approve_button
])
self.vbox_list_requests.append(hbox)
self.make_approve_handler(hbox, approve_button, chat_id)
async def show_public(self, public):
self.select_channels.append(G.DropDownItem(text=public['name']))
if public['name'] in self.publics_controls:
control = self.publics_controls[public['name']]
else:
control = dict(
vbox=G.VBox(width="100%")
)
self.vbox_publics.append(control['vbox'])
self.publics_controls[public['name']] = control
try:
subscribers_list_control = control['vbox']
subscribers_list_control.empty()
subscribers_list_control.append(G.Label(f"[{public['name']}]: {len(public['subscribers'])}"))
subscribers = sorted(public['subscribers'])
for subscriber in subscribers:
subscribers_list_control.append(G.Label(subscriber))
except Exception as e:
pass
async def on_add_new(self, *args):
self.button_add_new.set_enabled(False)
public_name = self.edit_name.get_value()
print(f"adding new public {public_name}")
service = await self.cloud.wait_service("telegram_bot")
await service.request("create", public_name)
await self.on_update_publics()
self.edit_name.set_text("")
self.button_add_new.set_enabled(True)
def make_approve_handler(self, hbox, approve_button, chat_id):
def wrapper(*args):
async def wrapper_async(*args):
service = await self.cloud.wait_service("telegram_bot")
await service.request("approve_pending", chat_id)
self.vbox_list_requests.remove_child(hbox)
self.add_background_job(wrapper_async)
approve_button.onclick.do(wrapper)
cloud: 'UICloud'
|
# Generated by Django 2.0.2 on 2018-02-25 14:51
from django.db import migrations
import lists.models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_auto_20180225_1540'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='due_date',
field=lists.models.DueDateField(blank=True),
),
]
|
import fire
import os
from tqdm import tqdm
import pickle
from models.bart import BART
from models.gpt2 import GPT2
def get_model_list(dataset, prog_vocabs, first_model):
model_list = []
for i in range(len(prog_vocabs) - 1):
if i == 0 and 'gpt2' in first_model:
model = GPT2(gpt2_type=first_model)
model.load_model(f'training_logs/{first_model}_{dataset}_'
f'{prog_vocabs[1]}words/best_model.pt')
else:
model = BART()
model.load_model(f'training_logs/bart_{dataset}_{prog_vocabs[i]}-'
f'{prog_vocabs[i+1]}/best_model.pt')
model_list.append(model)
return model_list
def generate(model, cond, top_k, top_p):
while True:
gen_text = model.generate(cond=cond, top_k=top_k, top_p=top_p)
if len(list(filter(str.isalpha, gen_text))) > 0:
return gen_text
def main(dataset,
prog_steps,
first_model,
top_k=-1,
top_p=0.95):
prog_vocabs = prog_steps.split('-')
assert prog_vocabs[0] == 'null' and prog_vocabs[-1] == 'full'
model_list = get_model_list(dataset, prog_vocabs, first_model)
decoding = 'top_'
if top_k > 0:
decoding += f'k{top_k}'
if top_p > 0:
decoding += f'p{top_p}'
test_examples = pickle.load(open(f'data/{dataset}/test.pickle', 'rb'))
gen_dir = f'generated_texts/{dataset}_first-{first_model}_{prog_steps}/' \
f'{decoding}'
os.makedirs(gen_dir, exist_ok=True)
log_file = open(f'{gen_dir}/gen.txt', 'w')
gens = []
for example in tqdm(test_examples, desc='Generating'):
condition, truth = example['condition'], example['text']
prog_gens = [generate(
model=model_list[0], cond=condition, top_k=top_k, top_p=top_p)]
for model in model_list[1:]:
prog_gens.append(generate(
model=model,
cond=condition + ' [SEP] ' + prog_gens[-1],
top_k=top_k, top_p=top_p))
gens.append({
'condition': condition,
'truth': truth,
'prog_gens': prog_gens,
'top_k': top_k,
'top_p': top_p
})
print(f'CONDITION:\n{condition}\n', '-' * 50, '\n\n',
f'TRUTH:\n{truth}\n', '=' * 100, '\n\n', file=log_file)
for step, text in enumerate(prog_gens):
print(f'STEP_{step}:\n{text}\n', '-' * 50, '\n\n', file=log_file)
print('=' * 50, file=log_file)
log_file.flush()
pickle.dump(gens, open(f'{gen_dir}/gen.pickle', 'wb'))
if __name__ == '__main__':
fire.Fire(main)
|
default_app_config = "netbox_rbac.apps.AppConfig"
|
from rest_framework.generics import (CreateAPIView, RetrieveUpdateAPIView,
RetrieveDestroyAPIView, ListAPIView
)
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from .models import SupplierListing
from .serializer import SupplierSerializer
from .permission import IsSupplier
from accounts.authentication import JwtAuthentication
from owner.views import PropertyListingCreateAPIView
class CreateHotelPost(PropertyListingCreateAPIView):
permission_classes = (IsAuthenticated, IsSupplier) #IsValidUser, CanPostListing)
authentication_classes = (JwtAuthentication, SessionAuthentication)
queryset = SupplierListing
serializer_class = SupplierSerializer
class RetrieveHotelPost(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, IsSupplier) #IsValidUser, CanPostListing)
authentication_classes = (JwtAuthentication, SessionAuthentication)
queryset = SupplierListing
serializer_class = SupplierSerializer
lookup_url_kwarg = 'pk'
class DeleteHotelPost(RetrieveDestroyAPIView):
permission_classes = (IsAuthenticated, IsSupplier) #IsValidUser, CanPostListing)
authentication_classes = (JwtAuthentication, SessionAuthentication)
queryset = SupplierListing
serializer_class = SupplierSerializer
lookup_url_kwarg = 'pk'
class ListHotelPost(ListAPIView):
permission_classes = (IsAuthenticated, IsAuthenticated) #IsValidUser, CanPostListing)
authentication_classes = (JwtAuthentication, SessionAuthentication)
queryset = SupplierListing
serializer_class = SupplierSerializer
def get_queryset(self):
user = self.request.user
return self.queryset.objects.filter(listing_id=user).all()
|
import requests
import json
url = 'http://tentalux.dubno.com/controlsome'
data=[{'number':0, 'brightness':105}]
print(json.dumps(data))
response = requests.post(url, data={'data':json.dumps(data)})
data = response.json()
ARBs = data['ARBs']
arms = len(ARBs)
brightness = sum( [arb[2] for arb in ARBs] ) / arms
print(arms)
print(brightness)
print(data)
|
import asyncio
import logging
import typing
if typing.TYPE_CHECKING:
from hub.scribe.network import LBCMainNet
from hub.scribe.daemon import LBCDaemon
def chunks(items, size):
"""Break up items, an iterable, into chunks of length size."""
for i in range(0, len(items), size):
yield items[i: i + size]
class Prefetcher:
"""Prefetches blocks (in the forward direction only)."""
def __init__(self, daemon: 'LBCDaemon', coin: 'LBCMainNet', blocks_event: asyncio.Event):
self.logger = logging.getLogger(__name__)
self.daemon = daemon
self.coin = coin
self.blocks_event = blocks_event
self.blocks = []
self.caught_up = False
# Access to fetched_height should be protected by the semaphore
self.fetched_height = None
self.semaphore = asyncio.Semaphore()
self.refill_event = asyncio.Event()
# The prefetched block cache size. The min cache size has
# little effect on sync time.
self.cache_size = 0
self.min_cache_size = 10 * 1024 * 1024
# This makes the first fetch be 10 blocks
self.ave_size = self.min_cache_size // 10
self.polling_delay = 0.5
async def main_loop(self, bp_height, started: asyncio.Event):
"""Loop forever polling for more blocks."""
await self.reset_height(bp_height)
started.set()
try:
while True:
# Sleep a while if there is nothing to prefetch
await self.refill_event.wait()
if not await self._prefetch_blocks():
await asyncio.sleep(self.polling_delay)
except Exception as e:
if not isinstance(e, asyncio.CancelledError):
self.logger.exception("block fetcher loop crashed")
raise
finally:
self.logger.info("block pre-fetcher is shutting down")
def get_prefetched_blocks(self):
"""Called by block processor when it is processing queued blocks."""
blocks = self.blocks
self.blocks = []
self.cache_size = 0
self.refill_event.set()
return blocks
async def reset_height(self, height):
"""Reset to prefetch blocks from the block processor's height.
Used in blockchain reorganisations. This coroutine can be
called asynchronously to the _prefetch_blocks coroutine so we
must synchronize with a semaphore.
"""
async with self.semaphore:
self.blocks.clear()
self.cache_size = 0
self.fetched_height = height
self.refill_event.set()
daemon_height = await self.daemon.height()
behind = daemon_height - height
if behind > 0:
self.logger.info(f'catching up to daemon height {daemon_height:,d} '
f'({behind:,d} blocks behind)')
else:
self.logger.info(f'caught up to daemon height {daemon_height:,d}')
async def _prefetch_blocks(self):
"""Prefetch some blocks and put them on the queue.
Repeats until the queue is full or caught up.
"""
daemon = self.daemon
daemon_height = await daemon.height()
async with self.semaphore:
while self.cache_size < self.min_cache_size:
# Try and catch up all blocks but limit to room in cache.
# Constrain fetch count to between 0 and 500 regardless;
# testnet can be lumpy.
cache_room = self.min_cache_size // self.ave_size
count = min(daemon_height - self.fetched_height, cache_room)
count = min(500, max(count, 0))
if not count:
self.caught_up = True
return False
first = self.fetched_height + 1
hex_hashes = await daemon.block_hex_hashes(first, count)
if self.caught_up:
self.logger.info('new block height {:,d} hash {}'
.format(first + count-1, hex_hashes[-1]))
blocks = await daemon.raw_blocks(hex_hashes)
assert count == len(blocks)
# Special handling for genesis block
if first == 0:
blocks[0] = self.coin.genesis_block(blocks[0])
self.logger.info(f'verified genesis block with hash {hex_hashes[0]}')
# Update our recent average block size estimate
size = sum(len(block) for block in blocks)
if count >= 10:
self.ave_size = size // count
else:
self.ave_size = (size + (10 - count) * self.ave_size) // 10
self.blocks.extend(blocks)
self.cache_size += size
self.fetched_height += count
self.blocks_event.set()
self.refill_event.clear()
return True
|
# Tool for creating 10m buffers around collection points
# Developed by Bhekani Khumalo
import arcpy
import ramm
arcpy.env.overwriteOutput = True
try:
# Get inputs
col_points = arcpy.GetParameterAsText(0)
boundary = arcpy.GetParameterAsText(1)
buff_dist = arcpy.GetParameterAsText(2)
output_location = arcpy.GetParameterAsText(3)
filename = arcpy.GetParameterAsText(4)
# Set the workspace
arcpy.env.workspace = output_location
logger = ramm.defineLogger(output_location)
def log(message):
ramm.showPyMessage(message, logger)
# Make feature layers from the input datasets
log("\t - Beginning Process.")
arcpy.MakeFeatureLayer_management(col_points, "lyr_col_points")
# Use the boundary dataset to clip the relevant features
log("\t --- Clipping Features.")
arcpy.MakeFeatureLayer_management(boundary, "lyr_boundary")
arcpy.Clip_analysis("lyr_col_points", "lyr_boundary",
"col_points_clipped.shp")
# Repair the geometry
log("\t --- Repairing Geometry.")
arcpy.MakeFeatureLayer_management(
"col_points_clipped.shp", "lyr_col_points_clipped")
arcpy.CheckGeometry_management(
"lyr_col_points_clipped", "geomErrorReport.dbf")
no_of_errors = int(arcpy.GetCount_management(
"geomErrorReport.dbf").getOutput(0))
arcpy.RepairGeometry_management("lyr_col_points_clipped")
log(
"\t ----- {} geometry errors were found and repaired. Check geomErrorReport.dbf for details.".format(no_of_errors))
# Create Buffers
log("\t --- Creating Buffers")
buff_dist_wunits = buff_dist + " Meters"
arcpy.Buffer_analysis("lyr_col_points_clipped",
"col_points_buffered.shp", buff_dist_wunits, "FULL", "ROUND", "ALL")
log("\t --- Deagreggating Buffers")
arcpy.MultipartToSinglepart_management(
"col_points_buffered.shp", "col_points_buffered_split.shp")
no_of_buffers = int(arcpy.GetCount_management(
"col_points_buffered_split.shp").getOutput(0))
arcpy.Rename_management(
"col_points_buffered_split.shp", "{}.shp".format(filename))
arcpy.Delete_management("col_points_buffered.shp")
log(
"\t - Process Complete. {} buffers were created.".format(no_of_buffers))
except Exception as e:
ramm.handleExcept(logger)
|
# For each wav file in the input directory, convert it to mp3 and then delete it.
import argparse
import inspect
import os
import sys
CONVERT_TYPES = ['.flac', '.octet-stream', '.wav', '.wma']
# command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=str, default='', help='Input directory')
args = parser.parse_args()
root_dir = args.d
for filename in os.listdir(root_dir):
filepath = os.path.join(root_dir, filename)
if os.path.isfile(filepath):
base, ext = os.path.splitext(filename)
if ext != None and len(ext) > 0 and ext.lower() in CONVERT_TYPES:
target = os.path.join(root_dir, base)
cmd = f'ffmpeg -i "{filepath}" -y -vn -ar 44100 -ac 2 -b:a 192k "{target}.mp3"'
print(cmd)
os.system(cmd)
os.remove(filepath)
|
#!/usr/bin/python
'''
This reads BMP180 sensor data and sends it to Adafruit.io (https://adafruit.io/)
for data analysis and visualization. Requires a valid account.
'''
import sys, time
import Adafruit_BMP.BMP085 as BMP085
from Adafruit_IO import Client
aio = Client('dec373cb22994ee094a33f22f3f7a0e2')
INTERVAL = 30
try:
sensor = BMP085.BMP085()
while True:
aio.send('bmp180-temp', sensor.read_temperature())
aio.send('bmp180-pressure', sensor.read_pressure())
aio.send('bmp-alt', "{0:.2f}".format(float(sensor.read_altitude())))
aio.send('bmp-seapress', sensor.read_sealevel_pressure())
time.sleep(INTERVAL)
except KeyboardInterrupt:
print("Ctrl+C pressed... exiting.")
sys.exit(0)
except:
print("Error: {0} {1}".format(sys.exc_info()[0], sys.exc_info()[1]))
|
import pytest
import solution
@pytest.mark.parametrize(
"nums,expected",
[
([1, 2, 3], [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]),
([0, 1], [[0, 1], [1, 0]]),
([1], [[1]]),
],
)
def test_initial_pass(nums, expected):
got = solution.initial_pass(nums)
assert len(got) == len(expected)
for p in got:
assert list(p) in expected
|
import getopt
import os
import sys
import time
import jsonlines
import pandas as pd
class Aggregator:
"""This class produces a running total of the types of events in a directory"""
def __init__(self, argv):
"""
This method initialises the class variables
:param argv: List[String] - The runtime arguments
"""
self.path = None # The path to the data directory
self.files = [] # List of files already processed
self.data = [] # The raw data before it is converted to a pandas dataframe
self.df = None # The pandas dataframe
self.counts = None # The aggregated dataframe
self.get_args(argv)
def get_args(self, argv):
"""
This method parses the runtime arguments
:param argv: List[String] - The runtime arguments
"""
help_string = 'event_aggregator.py -h <help> -d <directory>'
# Get opts and args if available
try:
opts, args = getopt.getopt(argv, "hd:")
except getopt.GetoptError:
print(help_string)
sys.exit(2)
# Set args to variables
for opt, arg in opts:
if opt == '-h':
print(help_string)
sys.exit()
elif opt == "-d":
self.path = arg
def file_reader(self):
"""
This method reads all of the files in a directory and extracts the jsonlines data into a
list of dictionaries
"""
# Get list of files in directory
new_files = os.listdir(self.path)
for f in new_files:
# Only process files not already processed
if f not in self.files:
# Add file to processed files
self.files.append(f)
# Construct path
file_path = self.path + f
# Open file and add records to data
with jsonlines.open(file_path) as reader:
for obj in reader:
self.data.append(obj)
def stats_generator(self):
"""This method converts the list of dictionaries into a pandas dataframe then produces a count for each type"""
# Create pandas dataframe
self.df = pd.DataFrame(self.data)
# Total each event type
self.counts = self.df["type"].groupby(self.df["type"]).count()
def run(self):
"""This method reads the data, produces the statistics, and prints them once every 5 seconds"""
# Run forever
while True:
self.file_reader()
self.stats_generator()
# Print to terminal
print("============================")
print(self.counts)
# Wait 5 seconds
time.sleep(5)
if __name__ == "__main__":
print("Event Aggregator")
print(sys.argv[1:])
Aggregator(sys.argv[1:]).run()
|
#!/bin/python3
# coding=utf-8
# ------------------------------------------------------------------------------
# Created on October 05 12:49 2021
#
# Vertica Vaas Okta Login command
#
# Usage:
# va login
# va login --profile profile2
#
# (c) Copyright 2021 Micro Focus or one of its affiliates.
# ------------------------------------------------------------------------------
import pkce
import requests
import uuid
import json
import secrets
import re
from argparse import Namespace
from vcli.cmd.sub_command import SubCommandImplementation
from vcli.constant import VCLI_CONFIG_DEFAULT, REDIRECT_URL_CONSTANT
from vcli.models.vcli_config_file import VcliConfigFile
from vcli.models.vcli_credential_file import VcliCredentialFile
from vcli.exceptions.vcli_custom_exception import AccessTokenFailedError, ProfileNotExistError
from vcli.util.vcli_logger import logger
from vcli.util.error_message import ErrorMessage
from vcli.cmd.config_command import ConfigCommand
from vcli.util.help_message import HelpMessage
from vcli.util.utils import build_api_client, verify_access_token
from openapi_client.exceptions import ServiceException
from openapi_client.api.index_v1_api import IndexV1Api
class AuthCommand(SubCommandImplementation):
def operation_define(self, subparsers) -> None:
# Supported command line options:
# va login
# va login --profile profile2
login_parser = subparsers.add_parser('login', help=HelpMessage.login_header)
login_parser.add_argument('--profile', default=VCLI_CONFIG_DEFAULT, metavar='<value>', type=str, help=HelpMessage.profile)
login_parser.set_defaults(func=self.operation_cmd)
logout_parser = subparsers.add_parser('logout', help=HelpMessage.logout_header)
logout_parser.add_argument('--profile', default=VCLI_CONFIG_DEFAULT, metavar='<value>', type=str, help=HelpMessage.profile)
logout_parser.set_defaults(func=self.operation_logout)
def operation_cmd(self, args: Namespace) -> None:
try:
VcliConfigFile.check_profile_file(profile=args.profile)
VcliCredentialFile.check_profile_file(profile=args.profile)
except ProfileNotExistError:
logger.info("profile file not found")
vcli_config_data, vcli_credential_data = ConfigCommand.read_user_input_all(args=args)
ConfigCommand.set_config_data(
vcli_config_data=vcli_config_data,
vcli_credential_data=vcli_credential_data,
profile=args.profile
)
config_file = VcliConfigFile.read_profile_config(profile=args.profile)
credential_file = VcliCredentialFile.read_profile_config(profile=args.profile)
# Step 1: Get the session token, code challenge and code verifier
session_token, code_challenge, code_verifier = self.__create_auth_session(config_file=config_file, credential_file=credential_file)
# Step 2: Get access code
auth_code = self.__get_auth_code(
config_file=config_file,
credential_file=credential_file,
code_challenge=code_challenge,
session_token=session_token
)
# Step 3: Get access token
access_token = self._get_access_token(
config_file=config_file,
credential_file=credential_file,
auth_code=auth_code,
code_verifier=code_verifier
)
# Step 4: Verify the access token and stored it in credentials config
verify_access_token(
credential_file=credential_file,
access_token=access_token,
profile=args.profile
)
# Step 5. hit health of oa
# TODO: DISABLE THIS FOR NOW
# self._valid_oa_health(args=args)
# TODO: After login, save oa version into config file. Apply version support or not.
print("Login success")
def operation_logout(self, args: Namespace) -> None:
credential_file = VcliCredentialFile.read_profile_config(profile=args.profile)
credential_file.delete_access_token(profile=args.profile)
print("Logout success")
def _valid_oa_health(self, args: Namespace):
"""Check if remote oa is in health condition.
Args:
args (Namespace): args passed down from cmd
"""
api_client = build_api_client(profile=args.profile)
try:
IndexV1Api(api_client=api_client).services_health_get()
except ServiceException as se:
logger.debug(se)
print("WARNING: OA Service health check did not pass")
except Exception as e:
logger.debug(e)
print("WARNING: Failed to check OA Service health")
def __create_auth_session(self, config_file: VcliConfigFile, credential_file: VcliCredentialFile) -> [str, str, str]:
"""create auth session and prepare to send to okta for authentication
Args:
config_file (VcliConfigFile): config file content.
credential_file (VcliCredentialFile): credential file content.
Raises:
AccessTokenFailedError: Raised when user name or passwd is wrong
Returns:
[str, str, str]: session_token, code_challenge, code_verifier
"""
okta_url = credential_file.auth_endpont
username = credential_file.username
password = credential_file.password
code_verifier, code_challenge = pkce.generate_pkce_pair()
response = requests.post(
url=f"{okta_url}/api/v1/authn",
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
data=json.dumps(
{
"username": username,
"password": password,
"options": {
"multiOptionalFactorEnroll": True,
"warnBeforePasswordExpired": True,
},
}
)
)
result = response.json()
session_token = result.get('sessionToken')
if not session_token:
raise AccessTokenFailedError(ErrorMessage.LOGIN_FAILED_BAD_USERNAME_OR_PASSWORD)
result.pop('sessionToken', None)
logger.debug(f"create auth session step: {result}")
return session_token, code_challenge, code_verifier
def __get_auth_code(
self,
config_file: VcliConfigFile,
credential_file: VcliCredentialFile,
code_challenge: str,
session_token: str
) -> str:
"""Method to get auth code.
Args:
config_file (VcliConfigFile): config file content.
credential_file (VcliCredentialFile): credential file content.
code_challenge (str): [description]
session_token (str): [description]
Raises:
AccessTokenFailedError: raises when failed to retrieve access token
Returns:
str: Authorization Code
"""
state = uuid.uuid4()
nonce = secrets.token_urlsafe(32)
scope = "openid profile email"
redirect_url = f"{config_file.service_endpoint}/{REDIRECT_URL_CONSTANT}"
client_id = credential_file.client_id
okta_url = credential_file.auth_endpont
url = f"{okta_url}/oauth2/default/v1/authorize?client_id={client_id}&response_type=code&response_mode=form_post&scope={scope}&redirect_uri={redirect_url}&state={state}&nonce={nonce}&code_challenge_method=S256&code_challenge={code_challenge}&sessionToken={session_token}"
response = requests.get(url)
state_resp = re.findall(r'<input\s+type="hidden"\s+name="state"\s+value="([A-Za-z0-9_\-]*)"\s*/>', response.text)
code_resp = re.findall(r'<input\s+type="hidden"\s+name="code"\s+value="([A-Za-z0-9_\-]*)"\s*/>', response.text)
if len(state_resp) == 0 or len(code_resp) == 0:
logger.info("State parameter or code value is not valid!")
logger.debug(f"state_resp: {state_resp}")
logger.debug(f"code_resp: {code_resp}")
raise AccessTokenFailedError(ErrorMessage.ERROR_STATE_PARAM_VERIFY_FAILED)
state_value = state_resp[0]
# Authorization Code
auth_code = code_resp[0]
# Verify the state parameter
if state_value == str(state):
logger.info("State parameter verified successfully.")
else:
logger.info("State parameter is not verified!")
raise AccessTokenFailedError(ErrorMessage.ERROR_STATE_PARAM_VERIFY_FAILED)
return auth_code
def _get_access_token(
self,
config_file: VcliConfigFile,
credential_file: VcliCredentialFile,
auth_code: str,
code_verifier: str
) -> str:
"""Method to get access token
Args:
config_file (VcliConfigFile): config file content.
credential_file (VcliCredentialFile): credential file content.
auth_code (str): [description]
code_verifier (str): [description]
Returns:
str: Access token
"""
redirect_url = f"{config_file.service_endpoint}/{REDIRECT_URL_CONSTANT}"
client_id = credential_file.client_id
okta_url = credential_file.auth_endpont
response = requests.post(
url=f"{okta_url}/oauth2/default/v1/token",
data={
"grant_type": "authorization_code",
"client_id": client_id,
"redirect_uri": redirect_url,
"code": auth_code,
"code_verifier": code_verifier,
},
)
result = response.json()
access_token = result["access_token"]
result.pop('access_token', None)
result.pop('id_token', None)
logger.debug(f"Access Token step: {result}")
return access_token
|
import tempfile
from pathlib import Path
from libdotfiles.packages import try_install
from libdotfiles.util import current_username, distro_name, download_file, run
if distro_name() == "linux":
try_install("docker")
try_install("docker-compose")
elif distro_name() == "linuxmint":
with tempfile.TemporaryDirectory() as tmpdir:
target_file = Path(tmpdir) / "docker-key.gpg"
download_file(
"https://download.docker.com/linux/ubuntu/gpg", target_file
)
run(["sudo", "apt-key", "add", target_file], check=False)
run(
[
"sudo",
"add-apt-repository",
"deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable",
],
check=False,
)
run(["sudo", "apt", "update"], check=False)
run(
[
"sudo",
"apt",
"install",
"-y",
"docker-ce",
"docker-ce-cli",
"containerd.io",
],
check=False,
)
run(["sudo", "systemctl", "start", "docker"], check=False)
run(["sudo", "systemctl", "enable", "docker"], check=False)
run(["sudo", "usermod", "-aG", "docker", current_username()], check=False)
|
print("-----Problema 1-----")
m = "Maximul dintre doua numere"
print(m.upper())
a = int(input("Dati-l pe a: "))
b = int(input("Dati-l pe b: "))
def maxim(a,b):
if a < b :
max = b
print("a < b max = ", max)
elif a > b:
max = a
c = format(max)
print(f'a > b max = {c}')
else:
print("numerele sunt egale")
maxim(a,b)
print("\n-----Problema 2-----")
m = " Grading system"
print(m.upper())
mark = int(input("Enter mark: "))
if 0 < mark < 25:
print("Grade is F")
elif 25 <= mark < 45:
print("Grade is E")
elif 45 <= mark < 50:
print("Grade is D")
elif 50 <= mark < 60:
print("Grade is C")
elif 60 <= mark < 80:
print("Grade is B")
elif 80 <= mark <= 100:
print("Grade is A")
else:
print("Mark does not exist!")
print("\n-----Problema 3-----")
m = "Discount application for "
print(m.upper() + "50+ kg")
quantity = float(input("* Introdu cantitate: "))
cost = quantity * 20.3
discount = 0.1 * cost
cost_final = cost - discount
pret = "{:.3f}".format(cost)
if cost > 1000:
print(f'** La un pret de 20.3 lei/kg si \n** un discount de 10% din {pret} avem:')
print(f'*** Pret final: {"{:.2f}".format(cost_final)} lei')
else:
print(f'** La un pret de 20.3 lei/kg fara discount de 10% din {pret} avem:')
print("*** Pret final fara discount: ", pret)
print("\n-----Problema 4-----")
m = "Welcome "
print(m.upper() + "Bond! JAMES Bond")
i = str(input("Numele agentului: "))
if i == "Bond":
print(" * Welcome on board 007")
else:
print(f' * Good morning {i}')
l = "-- Hello {}!"
print(l.format(i))
print(" * Hello {}".format(i))
print("\n-----Problema 5-----")
m = " password validity"
print(m.upper())
print("$#@")
c = input("Introduceti parola: ")
car = 0
spec = 0
if 6 <= len(c) and len(c) <=16:
for i in c:
if (i.islower() or i.isupper() or i.isdigit()):
car += 1
if (i == '$') or (i == '#') or (i == '@'):
spec += 1
if (car > 1 and spec >= 1 and car + spec == len(c)):
print("Parola valida")
else:
print("Parola invalida")
print("\n-----Problema 6-----")
m = " 5 or 6"
print(m.upper())
n = int(input("Introduceti un numar: "))
l = str(n)
def numar():
if n == 5 :
print("Numarul introdus este {} nu {}".format(n, n + 1))
elif n == 6 :
print("Numarul introdus este {} nu {}".format(n, n - 1))
elif (n != 5 or n != 6):
print("Numarul introdus este diferit de 5 sau 6 !")
else:
if l == str(n):
print("Ati introdus o litera!")
numar()
print("\n-----Problema 7-----")
m = " sortare"
print(m.upper())
a = int(input("a = "))
b = int(input("b = "))
c = int(input("c = "))
def sort(a,b,c):
if a < b and b < c :
print("Sortare: ", a,b,c)
elif a > b and b < c and a > c:
print("Sortare: ", b,c,a)
elif a > b and b < c and a < c:
print("Sortare: ", b,a,c)
elif a < b and b > c and a < c:
print("Sortare: ", a,c,b)
elif a < b and b > c and a > c:
print("Sortare: ", c,a,b)
else:
print("Sortare: ", c, b, a)
sort(a,b,c)
# print("\nMetoda a 2-a cu numere pana la 10 !!!")
# list = [input("a= "), input("b= "), input("c= ")]
# #list = ["5", "8" , "7"]
# print("Lista de numere este: ", list)
# list.sort()
# print("Lista 1 sortata este: ", list)
# sorted(list)
# print("Lista 2 sortata este: ", list)
# list.sort(reverse=True)
# print("Lista 3 reverse sortata este: ", list)
# sorted(list, reverse = True)
# print("Lista 4 reverse sortata este: ", list)
# sorted(list)
# print("Lista 5 sortata este: ", list)
# print("Lista 6 sortata este: ", sorted(list))
print("\n-----Problema 8-----")
m = " Parrot trouble"
print(m.upper())
hour = int(input("Ore liniste: "))
if ((hour >= 0) and (hour <= 7)):
print("Papagalul deranjeaza: ", bool(hour))
elif (hour >= 20) and (hour <= 23):
print("Papagalul deranjeaza: ", bool(hour))
else:
if hour in range(24):
print("Nu deranjeaza!")
else:
print("Orele nu apartin intervalului dat!")
import datetime
now = datetime.datetime.now()
der = ""
def ora(now):
print("An - {}, Luna - {}, Zi - {}, ___ Ora - {} ___ , Minut - {}, Secunda - {} ".format(now.year, now.month, now.day, now.hour, now.minute, now.second))
if now.hour >= 0 and now.hour <= 7:
return True
elif (now.hour >= 20) and (now.hour <= 23):
return True
else:
if now.hour in range(24):
print("Nu deranjeaza!")
else:
print("Orele nu apartin intervalului dat!")
print("Papagalul deranjeaza: ",ora(now))
print("\n-----Problema 9-----")
m = " not "
print(m.upper())
sir = str(input("Dati sirul: "))
if sir[0:3] == "not":
print("Sirul este: " + sir)
else:
print("Noul sir este: " + "not" + " " + sir)
print("\n-----Problema 10-----")
m = " Hi "
print(m.upper())
strg = str(input("Dati sirul: "))
def fct():
if strg[0:2] == "hi":
return True
else:
return False
print(" ", fct())
print("\n-----Problema 11-----")
m = " Forbidden sum returns '20' "
print(m.upper())
print("Forbidden sum { 10 and 19 }")
a, b = int(input("a= ")), int(input("b= "))
c = a + b
if c >= 10 and c <= 19:
print("20")
else:
print("Suma: ", c)
print("\n-----Problema 12-----")
m = " *11/*11 + 1 == special "
print(m.upper())
print("Obtine un multiplu de 11")
mul = int(input("Dati un numar: "))
multiplu = 11 * mul
print(multiplu)
no = int(input("Introduceti un numar "))
if no == multiplu or no == multiplu + 1:
print("Special --- ", bool(no))
else:
print("Special --- ", bool(0))
n = int(input("numarul este: "))
if n % 11 == 0 or (n-1) % 11 == 0: # sau if n%11 == 0 or n%11 == 1:
print("este numar special")
else:
print("nu este numar special")
|
import SimpleLanguage
class FirstExampleClass:
def __init__(self):
# Init SimpleLanguage with default settings (defaultLanguage: eng, actualLanguage: eng, databasePath: ".\\language\\")
lang = SimpleLanguage.init()
# Take a text from database
print(lang.rString("testKey1"))
# But if we want a single text in a different language?
print(lang.rString("testKey2", "ita"))
# If we want to change actualLanguage?
lang.changeLanguage("ita")
print(lang.rString("testKey3"))
# We have a possibility to change database path
lang.changeDatabase(".\\language2")
print(lang.rString("testKey4"))
if __name__ == "__main__":
FirstExampleClass()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from builtins import map, range, object, zip, sorted
import sys
import os
def main(argc, argv):
from amplpy import AMPL, DataFrame
import amplpy
from time import time
from threading import Lock
os.chdir(os.path.dirname(__file__) or os.curdir)
try:
ampl = AMPL()
ampl.setOption('reset_initial_guesses', True)
ampl.setOption('send_statuses', False)
ampl.setOption('relax_integrality', True)
if argc > 1:
ampl.setOption('solver', argv[1])
# Load the AMPL model from file
modelDirectory = argv[2] if argc == 3 else os.path.join('..', 'models')
ampl.read(os.path.join(modelDirectory, 'qpmv/qpmv.mod'))
ampl.read(os.path.join(modelDirectory, 'qpmv/qpmvbit.run'))
# Set tables directory (parameter used in the script above)
ampl.getParameter('data_dir').set(os.path.join(modelDirectory, 'qpmv'))
# Read tables
ampl.readTable('assetstable')
ampl.readTable('astrets')
# Create a lock
mutex = Lock()
mutex.acquire()
# Set the output handler to accumulate the output messages
class MyOutputHandler(amplpy.OutputHandler):
"""
Class used as an output handler. It only prints the solver output.
Must implement :class:`amplpy.OutputHandler`.
"""
def output(self, kind, msg):
if kind == amplpy.Kind.SOLVE:
assert ampl.isBusy()
print('Solver: {}'.format(msg))
class MyErrorHandler(amplpy.ErrorHandler):
def error(self, exception):
print('Error:', exception.getMessage())
def warning(self, exception):
print('Warning:', exception.getMessage())
class MyInterpretIsOver(amplpy.Runnable):
"""
Object used to communicate the end of the async operation. Must
implement :class:`amplpy.Runnable`.
"""
def run(self):
print("Solution process ended. Notifying waiting thread.")
mutex.release()
# Create an output handler
outputHandler = MyOutputHandler()
ampl.setOutputHandler(outputHandler)
# Create an error handler
errorHandler = MyErrorHandler()
ampl.setErrorHandler(errorHandler)
# Create the callback object
callback = MyInterpretIsOver()
print("Main thread: Model setup complete. Solve on worker thread.")
# Initiate the async solution process, passing the callback object
# as a parameter.
# The function run() will be called by the AMPL API when the
# solution process will be completed.
ampl.solveAsync(callback)
# ampl.evalAsync('solve;', callback)
# Wait for the solution to complete
print("Main thread: Waiting for solution to end...")
start = time()
mutex.acquire()
duration = time() - start
print("Main thread: done waiting.")
# At this stage, the AMPL process is done, the message
# 'Solution process ended.' has been printed on the console by the
# callback and we print a second confirmation from the main thread
print("Main thread: waited for {} s".format(duration))
# Print the objective value
print("Main thread: cost: {}".format(ampl.getValue('cst')))
except Exception as e:
print(e, type(e))
raise
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
|
Subsets and Splits