filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_24985
|
# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import lasagne.layers as L
import numpy as np
import theano
import theano.tensor as T
from .base import BasePatternComputer
__all__ = [
"CombinedPatternComputer"
]
# How everything fits together :
# Pattern types and the filter function.
subtypes = [
('basic', lambda x: 1.0+0.0*x),
('positive_y', lambda x: 1.0*T.gt(x,0.0)),
('negative_y', lambda x: 1.0-1.0*T.gt(x,0.0))
]
# Statistics need for each pattern type.
subtype_keys = [
'cnt', # Number of sample per variable.
'm_x', # Mean along x.
'm_y', # Mean along y.
'xty', # Covariance x and y.
'yty', # Covaraince y and y.
]
# This has a specific aggregation function.
subtype_keys_no_aggregation = ['cnt']
# Create new stats dict.
def create_dict(new_stats):
ret = []
n_per_dict = len(subtypes)*len(subtype_keys)
for i in range(0, len(new_stats), n_per_dict):
ret.append(list_to_dict(new_stats[i : i+n_per_dict]))
return ret
# Stats list to dict.
def list_to_dict(stats_list):
stats_dict = dict()
idx = 0
for key, _ in subtypes:
stats_dict[key]=dict()
for sub_key in subtype_keys:
stats_dict[key][sub_key] = stats_list[idx]
idx+=1
return stats_dict
# Stats dict to list
def dict_to_list(stats_dict):
stats_list = []
for key,_ in subtypes:
for sub_key in subtype_keys:
stats_list.append(stats_dict[key][sub_key])
return stats_list
class CombinedPatternComputer(BasePatternComputer):
def get_split(self, layer,
deterministic=True, conv_all_patches=True, **kwargs):
# Get the patches and the outputs without the non-linearities.
if type(layer) is L.DenseLayer:
x, y = get_dense_xy(layer, deterministic)
elif type(layer) is L.Conv2DLayer:
if conv_all_patches is True:
x, y = get_conv_xy_all(layer, deterministic)
else:
x, y = get_conv_xy(layer, deterministic)
else:
raise ValueError("Unknown layer as input")
# Create an output dictionary
outputs = dict()
for name, fun in subtypes:
outputs[name] = dict()
mrk_y = 1.0* T.cast(fun(y), dtype=theano.config.floatX) # (N,O)
y_current = y*mrk_y # This has a binary mask
cnt_y = T.shape_padaxis(T.sum(mrk_y, axis=0), axis=0) # (1,O)
norm = T.maximum(cnt_y, 1.)
# Count how many datapoints are considered
outputs[name]['cnt'] = cnt_y
# The mean of the current batch
outputs[name]['m_y'] = T.shape_padaxis(y_current.sum(axis=0), axis=0) / norm # (1,O) mean output for batch
outputs[name]['m_x'] = T.dot(x.T, mrk_y) / norm # (D,O) mean input for batch
# The mean of the current batch
outputs[name]['yty'] = T.shape_padaxis(T.sum(y_current ** 2., axis=0), axis=0) / norm # (1,O)
outputs[name]['xty'] = T.dot(x.T, y_current) / norm # D,O
return dict_to_list(outputs)
def _update_statistics(self, new_stats, stats):
new_stats = create_dict(new_stats)
if stats is None:
stats = new_stats
return stats
# update the stats layerwise
for l_i in range(len(stats)):
for subtype,_ in subtypes:
# TODO: Have to check the type to see if this is needed
cnt_old = 1.0 * stats[l_i][subtype]['cnt']
stats[l_i][subtype]['cnt'] = (stats[l_i][subtype]['cnt']
+ new_stats[l_i][subtype]['cnt'])
norm = np.maximum(stats[l_i][subtype]['cnt'], 1.0)
for key in subtype_keys:
if key not in subtype_keys_no_aggregation:
tmp_old = cnt_old / norm * stats[l_i][subtype][key]
tmp_new = (new_stats[l_i][subtype]['cnt']
/ norm * new_stats[l_i][subtype][key])
stats[l_i][subtype][key] = tmp_old + tmp_new
return stats
def _compute_Exy_ExEy(self,stats,key,l_i):
return (stats[l_i][key]['xty']
- stats[l_i][key]['m_x'] * stats[l_i]['basic']['m_y']) # D,O
def _get_W(self, id):
dl = self.layers[id]
W = dl.W.get_value()
if W.ndim == 4:
if dl.flip_filters:
W = W[:, :, ::-1, ::-1]
W = get_2D(W)
return W
def _update_length(self, A, id):
W = self.get_W(id)
norm = np.diag(np.dot(get_2D(W).T,A))[np.newaxis]
norm = norm + 1.0*(norm == 0.0)
return A / norm
def _compute_A(self, stats, key, l_i):
W = self._get_W(l_i) #D,O
numerator = self._compute_Exy_ExEy(stats, key, l_i) #D,O
denumerator = np.dot(W.T,numerator) #O,O
denumerator = np.diag(denumerator) #1,O
if np.sum(denumerator == 0) > 0:
denumerator= denumerator + 1.0*(denumerator==0)
A = numerator / denumerator[np.newaxis]
A = self._update_length(A, l_i)
return A
def _compute_patterns(self, stats):
patterns = dict()
for key,_ in subtypes:
patterns[key]=dict()
patterns[key]['A'] = []
patterns[key]['r'] = []
patterns[key]['mu'] = []
for l_i in range(len(stats)):
# using uppercase now
A = self._compute_A(stats, key, l_i)
r = stats[l_i][key]['m_x'] - A * stats[l_i][key]['m_y'] # D,O
mu = stats[l_i][key]['m_x']
if self.layers[l_i].W.get_value().ndim == 4:
A = A.T.reshape(self.layers[l_i].W.get_value().shape)
r = r.T.reshape(A.shape)
mu = mu.T.reshape(A.shape)
assert(np.sum(np.isnan(A)) == 0.,
"Something went wrong, nan in A")
patterns[key]['A'].append(A.astype(np.float32))
patterns[key]['r'].append(r.astype(np.float32))
patterns[key]['mu'].append(mu.astype(np.float32))
return patterns
def process_batches(self, X_train, batch_size, n_batches=None, **kwargs):
is_generator = type(X_train) not in [np.ndarray, np.core.memmap]
if is_generator is True:
if n_batches is None:
raise ValueError("X_train is generator, in this case "
"n_batches needs to be specified.")
else:
n_datapoints = X_train.shape[0]
n_batches = n_datapoints // batch_size
stats = None
for i in range(n_batches):
# Load batch
if is_generator:
X = X_train()
else:
X = X_train[i*batch_size : (i+1)*batch_size]
# Get components from the GPU
new_stats = self.f(X)
# Update stats.
stats= self._update_statistics(new_stats, stats)
# Compute the actual patterns
return self._compute_patterns(stats)
|
the-stack_0_24986
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" This file contains code that can scrape the National Weather Service (NWS) website and read the
river level data for both Markland and McAlpine dams. By using the mileage marker for Bushman's Lake
the level of the river at that point can be calculated.
"""
# from tabulate import tabulate
from loguru import logger
logger.remove() # stop any default logger
LOGGING_LEVEL = "INFO"
from os import sys, path
from datetime import datetime, timezone
from dateutil import parser as dateparser
# from dateutil.utils import default_tzinfo
# import datefinder
from dateparser.search import search_dates
from pprint import saferepr
# from pprint import pprint
from WebScraping import retrieve_cleaned_html
from lxml import etree as ET
RUNTIME_NAME = path.basename(__file__)
Data_datestamp = datetime.now()
ACTION_LABELS = ["First-action", "Minor-flood", "Moderate-flood", "Major-flood"]
MARKLAND_DAM_URL = "https://water.weather.gov/ahps2/hydrograph.php?wfo=iln&gage=mklk2"
MARKLAND_DAM_NAME = "Markland"
MCALPINE_DAM_URL = "https://water.weather.gov/ahps2/hydrograph.php?gage=mluk2&wfo=lmk"
MCALPINE_DAM_NAME = "McAlpine"
MCALPINE_DAM_DETAILS = {
"Friendly_Name": "McAlpine Dam Upper Guage",
"Dam_URL": MCALPINE_DAM_URL,
"milemarker": 606.8,
"guage_elevation": 407.18,
ACTION_LABELS[0]: 21,
ACTION_LABELS[1]: 23,
ACTION_LABELS[2]: 30,
ACTION_LABELS[3]: 38,
}
MARKLAND_DAM_DETAILS = {
"Friendly_Name": "Markland Dam Lower Guage",
"Dam_URL": MARKLAND_DAM_URL,
"milemarker": 531,
"guage_elevation": 408,
ACTION_LABELS[0]: 49,
ACTION_LABELS[1]: 51,
ACTION_LABELS[2]: 62,
ACTION_LABELS[3]: 74,
}
RIVER_MONITORING_POINTS = {
MCALPINE_DAM_NAME: MCALPINE_DAM_DETAILS,
MARKLAND_DAM_NAME: MARKLAND_DAM_DETAILS,
}
DAMS = list(RIVER_MONITORING_POINTS.keys())
IMPORTANT_OBSERVATIONS = ["Forecast:", "Latest", "Highest"]
@logger.catch
def ISO_datestring(dt, cl):
""" Convert a DateTime object to an ISO datestring.
also fix an error in the conversion
.isoformat() returns 12:00:00 for both Noon and Midnight.
Also trim date to report only date, hours and minutes.
"""
isodatestr = dt.isoformat()
if cl[4] == "12:00AM": # reset time to 00:00 since it incorrectly gets set to 12:00
isodatestr = isodatestr[0:11] + "00:00"
else:
isodatestr = isodatestr[0:16] # just slice off seconds and timezone
return isodatestr
@logger.catch
def current_river_conditions(monitoring_point, dct):
""" scrape NOAA website for current river conditions.
Write results to PupDB file and include current flooding action level
"""
# TODO this routine is too fragile and needs better error handling
this_river = RIVER_MONITORING_POINTS[monitoring_point]
logger.info("Scraping webite..." + saferepr(this_river["Friendly_Name"]))
html = retrieve_cleaned_html(this_river["Dam_URL"])
if html != None:
logger.info('...scanning list of "map" objects...')
map_raw = html.select("map")[0] # grab first item named 'map'
else:
logger.error(
f'No "HTML" returned in web scrape of {this_river["Friendly_Name"]}'
)
return {} # error condition
parser_engine = ET.XMLParser(recover=True)
tree = ET.fromstring(str(map_raw), parser=parser_engine)
root = tree.getroottree()
root_map = root.getroot()
logger.debug("map name: " + saferepr(root_map.attrib["name"]))
map_dict = dct
for child in root_map:
# logger.debug("root_map_child tag: " + saferepr(child.tag))
try:
child_list = child.attrib["alt"].split()
child_list.append(RIVER_MONITORING_POINTS[monitoring_point]["milemarker"])
child_list.append(monitoring_point)
child_list.append(
RIVER_MONITORING_POINTS[monitoring_point]["guage_elevation"]
)
# logger.debug("Raw 'attrib' 'alt': " + saferepr(child.attrib["alt"]))
searchdate = search_dates(child.attrib["title"], languages=["en"])
if type(searchdate) == list:
child_date = searchdate[0][1]
date_iso = ISO_datestring(child_date, child_list)
child_list.append(date_iso)
# logger.debug("datestamp search result:" + str(date_iso))
if date_iso in map_dict:
# should only happen if two observations have the same datestamp
logger.error("duplicate key!") # TODO raise dupkey error
logger.debug("Raw 'attrib' 'alt': " + saferepr(child.attrib["alt"]))
logger.debug("datestamp search result:" + str(date_iso))
logger.debug(saferepr(child_list))
sys.exit(1)
else:
observation_key = date_iso + monitoring_point
map_dict[observation_key] = child_list
else:
logger.debug("no date found")
logger.debug("Raw 'attrib' 'alt': " + saferepr(child.attrib["alt"]))
logger.debug(f"datestamp search result:{type(searchdate)}")
logger.debug(saferepr(child.attrib))
except ValueError as e:
logger.debug("no date")
logger.debug("child element result:" + str(child))
logger.debug(saferepr(e))
except KeyError:
logger.debug("no title")
logger.debug("child element result:" + str(child))
logger.debug(f"Current_River_Conditions function results: {saferepr(map_dict)}")
return map_dict
@logger.catch
def clean_item(lst):
""" Remove a specified list of items from list and combine some items.
"""
try:
float(lst[1])
except ValueError:
# combine first and second items
tag = f"{lst[0]} {lst[1]}"
if lst[2] == "value:":
# drop bad label
lst = lst[3:]
else:
lst = lst[2:]
lst.insert(0, tag)
for item in ["at", "EST", "Flood", "Stage", "is", "ft"]:
lst = [s for s in lst if s != item]
if lst[3] in ["AM", "PM"]:
lst[2] = f"{lst[2]}{lst[3]}"
return lst
@logger.catch
def processRiverData():
"""get current data from NOAA website.
Organize data as dictionary keyed by timestamps+damname.
"""
logger.info("Program Start: " + RUNTIME_NAME)
results = {}
for name in DAMS:
results = current_river_conditions(name, results)
if results == {}:
return [] # error condition
times = list(results.keys())
times = sorted(times)
output = {}
for item in times:
if results[item][0] in IMPORTANT_OBSERVATIONS:
logger.debug(f"Raw item: {saferepr(results[item])}")
sani = clean_item(results[item])
logger.debug(f"Cleaned item: {sani}")
output[item] = sani
return output
@logger.catch
def defineLoggers():
logger.add(
sys.stderr,
colorize=True,
format="<green>{time}</green> {level} <red>{message}</red>",
level=LOGGING_LEVEL,
)
logger.add( # create a new log file for each run of the program
"./LOGS/" + RUNTIME_NAME + "_{time}.log",
retention="10 days",
compression="zip",
level="DEBUG", # always send debug output to file
)
return
@logger.catch
def MAIN():
defineLoggers()
# print(tabulate(processRiverData()))
map_data = processRiverData()
if map_data == []:
return False # error condition
for item in map_data:
print(item, map_data[item])
return True
if __name__ == "__main__":
result = MAIN()
if result == True:
logger.info("Program ended normally.")
else:
logger.info("Program ended abnormally.")
|
the-stack_0_24989
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
from frappe import _
from frappe.utils import cint
from frappe.model.naming import validate_name
from frappe.model.dynamic_links import get_dynamic_link_map
from frappe.utils.password import rename_password
from frappe.model.utils.user_settings import sync_user_settings, update_user_settings_data
@frappe.whitelist()
def rename_doc(doctype, old, new, force=False, merge=False, ignore_permissions=False, ignore_if_exists=False):
"""
Renames a doc(dt, old) to doc(dt, new) and
updates all linked fields of type "Link"
"""
if not frappe.db.exists(doctype, old):
return
if ignore_if_exists and frappe.db.exists(doctype, new):
return
if old==new:
frappe.msgprint(_('Please select a new name to rename'))
return
force = cint(force)
merge = cint(merge)
meta = frappe.get_meta(doctype)
# call before_rename
old_doc = frappe.get_doc(doctype, old)
out = old_doc.run_method("before_rename", old, new, merge) or {}
new = (out.get("new") or new) if isinstance(out, dict) else (out or new)
if doctype != "DocType":
new = validate_rename(doctype, new, meta, merge, force, ignore_permissions)
if not merge:
rename_parent_and_child(doctype, old, new, meta)
# update link fields' values
link_fields = get_link_fields(doctype)
update_link_field_values(link_fields, old, new, doctype)
rename_dynamic_links(doctype, old, new)
# save the user settings in the db
update_user_settings(old, new, link_fields)
if doctype=='DocType':
rename_doctype(doctype, old, new, force)
update_attachments(doctype, old, new)
rename_versions(doctype, old, new)
# call after_rename
new_doc = frappe.get_doc(doctype, new)
# copy any flags if required
new_doc._local = getattr(old_doc, "_local", None)
new_doc.run_method("after_rename", old, new, merge)
if not merge:
rename_password(doctype, old, new)
# update user_permissions
frappe.db.sql("""update tabDefaultValue set defvalue=%s where parenttype='User Permission'
and defkey=%s and defvalue=%s""", (new, doctype, old))
if merge:
new_doc.add_comment('Edit', _("merged {0} into {1}").format(frappe.bold(old), frappe.bold(new)))
else:
new_doc.add_comment('Edit', _("renamed from {0} to {1}").format(frappe.bold(old), frappe.bold(new)))
if merge:
frappe.delete_doc(doctype, old)
frappe.clear_cache()
return new
def update_user_settings(old, new, link_fields):
'''
Update the user settings of all the linked doctypes while renaming.
'''
# store the user settings data from the redis to db
sync_user_settings()
if not link_fields: return
# find the user settings for the linked doctypes
linked_doctypes = set([d.parent for d in link_fields if not d.issingle])
user_settings_details = frappe.db.sql('''select user, doctype, data from `__UserSettings` where
data like "%%%s%%" and doctype in ({0})'''.format(", ".join(["%s"]*len(linked_doctypes))),
tuple([old] + list(linked_doctypes)), as_dict=1)
# create the dict using the doctype name as key and values as list of the user settings
from collections import defaultdict
user_settings_dict = defaultdict(list)
for user_setting in user_settings_details:
user_settings_dict[user_setting.doctype].append(user_setting)
# update the name in linked doctype whose user settings exists
for fields in link_fields:
user_settings = user_settings_dict.get(fields.parent)
if user_settings:
for user_setting in user_settings:
update_user_settings_data(user_setting, "value", old, new, "docfield", fields.fieldname)
else:
continue
def update_attachments(doctype, old, new):
try:
if old != "File Data" and doctype != "DocType":
frappe.db.sql("""update `tabFile` set attached_to_name=%s
where attached_to_name=%s and attached_to_doctype=%s""", (new, old, doctype))
except Exception as e:
if e.args[0]!=1054: # in patch?
raise
def rename_versions(doctype, old, new):
frappe.db.sql("""update tabVersion set docname=%s where ref_doctype=%s and docname=%s""",
(new, doctype, old))
def rename_parent_and_child(doctype, old, new, meta):
# rename the doc
frappe.db.sql("update `tab%s` set name=%s where name=%s" % (frappe.db.escape(doctype), '%s', '%s'),
(new, old))
update_autoname_field(doctype, new, meta)
update_child_docs(old, new, meta)
def update_autoname_field(doctype, new, meta):
# update the value of the autoname field on rename of the docname
if meta.get('autoname'):
field = meta.get('autoname').split(':')
if field and field[0] == "field":
frappe.db.sql("update `tab%s` set %s=%s where name=%s" % (frappe.db.escape(doctype), field[1], '%s', '%s'),
(new, new))
def validate_rename(doctype, new, meta, merge, force, ignore_permissions):
# using for update so that it gets locked and someone else cannot edit it while this rename is going on!
exists = frappe.db.sql("select name from `tab{doctype}` where name=%s for update".format(doctype=frappe.db.escape(doctype)), new)
exists = exists[0][0] if exists else None
if merge and not exists:
frappe.msgprint(_("{0} {1} does not exist, select a new target to merge").format(doctype, new), raise_exception=1)
if exists and exists != new:
# for fixing case, accents
exists = None
if (not merge) and exists:
frappe.msgprint(_("Another {0} with name {1} exists, select another name").format(doctype, new), raise_exception=1)
if not (ignore_permissions or frappe.has_permission(doctype, "write")):
frappe.msgprint(_("You need write permission to rename"), raise_exception=1)
if not (force or ignore_permissions) and not meta.allow_rename:
frappe.msgprint(_("{0} not allowed to be renamed").format(_(doctype)), raise_exception=1)
# validate naming like it's done in doc.py
new = validate_name(doctype, new, merge=merge)
return new
def rename_doctype(doctype, old, new, force=False):
# change options for fieldtype Table
update_options_for_fieldtype("Table", old, new)
update_options_for_fieldtype("Link", old, new)
update_user_permissions(old, new)
# change options where select options are hardcoded i.e. listed
select_fields = get_select_fields(old, new)
update_link_field_values(select_fields, old, new, doctype)
update_select_field_values(old, new)
# change parenttype for fieldtype Table
update_parenttype_values(old, new)
def update_child_docs(old, new, meta):
# update "parent"
for df in meta.get_table_fields():
frappe.db.sql("update `tab%s` set parent=%s where parent=%s" \
% (frappe.db.escape(df.options), '%s', '%s'), (new, old))
def update_link_field_values(link_fields, old, new, doctype):
for field in link_fields:
if field['issingle']:
try:
single_doc = frappe.get_doc(field['parent'])
if single_doc.get(field['fieldname'])==old:
single_doc.set(field['fieldname'], new)
# update single docs using ORM rather then query
# as single docs also sometimes sets defaults!
single_doc.flags.ignore_mandatory = True
single_doc.save(ignore_permissions=True)
except ImportError:
# fails in patches where the doctype has been renamed
# or no longer exists
pass
else:
# because the table hasn't been renamed yet!
parent = field['parent'] if field['parent']!=new else old
frappe.db.sql("""\
update `tab%s` set `%s`=%s
where `%s`=%s""" \
% (frappe.db.escape(parent), frappe.db.escape(field['fieldname']), '%s',
frappe.db.escape(field['fieldname']), '%s'),
(new, old))
# update cached link_fields as per new
if doctype=='DocType' and field['parent'] == old:
field['parent'] = new
def get_link_fields(doctype):
# get link fields from tabDocField
if not frappe.flags.link_fields:
frappe.flags.link_fields = {}
if not doctype in frappe.flags.link_fields:
link_fields = frappe.db.sql("""\
select parent, fieldname,
(select issingle from tabDocType dt
where dt.name = df.parent) as issingle
from tabDocField df
where
df.options=%s and df.fieldtype='Link'""", (doctype,), as_dict=1)
# get link fields from tabCustom Field
custom_link_fields = frappe.db.sql("""\
select dt as parent, fieldname,
(select issingle from tabDocType dt
where dt.name = df.dt) as issingle
from `tabCustom Field` df
where
df.options=%s and df.fieldtype='Link'""", (doctype,), as_dict=1)
# add custom link fields list to link fields list
link_fields += custom_link_fields
# remove fields whose options have been changed using property setter
property_setter_link_fields = frappe.db.sql("""\
select ps.doc_type as parent, ps.field_name as fieldname,
(select issingle from tabDocType dt
where dt.name = ps.doc_type) as issingle
from `tabProperty Setter` ps
where
ps.property_type='options' and
ps.field_name is not null and
ps.value=%s""", (doctype,), as_dict=1)
link_fields += property_setter_link_fields
frappe.flags.link_fields[doctype] = link_fields
return frappe.flags.link_fields[doctype]
def update_options_for_fieldtype(fieldtype, old, new):
if frappe.conf.developer_mode:
for name in frappe.db.sql_list("""select parent from
tabDocField where options=%s""", old):
doctype = frappe.get_doc("DocType", name)
save = False
for f in doctype.fields:
if f.options == old:
f.options = new
save = True
if save:
doctype.save()
else:
frappe.db.sql("""update `tabDocField` set options=%s
where fieldtype=%s and options=%s""", (new, fieldtype, old))
frappe.db.sql("""update `tabCustom Field` set options=%s
where fieldtype=%s and options=%s""", (new, fieldtype, old))
frappe.db.sql("""update `tabProperty Setter` set value=%s
where property='options' and value=%s""", (new, old))
def update_user_permissions(old_doctype_name, new_doctype_name):
user_perms = frappe.get_all('User Permission', fields=['name','skip_for_doctype'])
for perm in user_perms:
doctype_list = perm.skip_for_doctype.split("\n") if perm.skip_for_doctype else []
if old_doctype_name in doctype_list:
new_list = [new_doctype_name if dt==old_doctype_name else dt for dt in doctype_list]
new_string = "\n".join(new_list)
frappe.db.set_value('User Permission', perm.name, 'skip_for_doctype', new_string)
def get_select_fields(old, new):
"""
get select type fields where doctype's name is hardcoded as
new line separated list
"""
# get link fields from tabDocField
select_fields = frappe.db.sql("""\
select parent, fieldname,
(select issingle from tabDocType dt
where dt.name = df.parent) as issingle
from tabDocField df
where
df.parent != %s and df.fieldtype = 'Select' and
df.options like "%%%%%s%%%%" """ \
% ('%s', frappe.db.escape(old)), (new,), as_dict=1)
# get link fields from tabCustom Field
custom_select_fields = frappe.db.sql("""\
select dt as parent, fieldname,
(select issingle from tabDocType dt
where dt.name = df.dt) as issingle
from `tabCustom Field` df
where
df.dt != %s and df.fieldtype = 'Select' and
df.options like "%%%%%s%%%%" """ \
% ('%s', frappe.db.escape(old)), (new,), as_dict=1)
# add custom link fields list to link fields list
select_fields += custom_select_fields
# remove fields whose options have been changed using property setter
property_setter_select_fields = frappe.db.sql("""\
select ps.doc_type as parent, ps.field_name as fieldname,
(select issingle from tabDocType dt
where dt.name = ps.doc_type) as issingle
from `tabProperty Setter` ps
where
ps.doc_type != %s and
ps.property_type='options' and
ps.field_name is not null and
ps.value like "%%%%%s%%%%" """ \
% ('%s', frappe.db.escape(old)), (new,), as_dict=1)
select_fields += property_setter_select_fields
return select_fields
def update_select_field_values(old, new):
frappe.db.sql("""\
update `tabDocField` set options=replace(options, %s, %s)
where
parent != %s and fieldtype = 'Select' and
(options like "%%%%\\n%s%%%%" or options like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', frappe.db.escape(old), frappe.db.escape(old)), (old, new, new))
frappe.db.sql("""\
update `tabCustom Field` set options=replace(options, %s, %s)
where
dt != %s and fieldtype = 'Select' and
(options like "%%%%\\n%s%%%%" or options like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', frappe.db.escape(old), frappe.db.escape(old)), (old, new, new))
frappe.db.sql("""\
update `tabProperty Setter` set value=replace(value, %s, %s)
where
doc_type != %s and field_name is not null and
property='options' and
(value like "%%%%\\n%s%%%%" or value like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', frappe.db.escape(old), frappe.db.escape(old)), (old, new, new))
def update_parenttype_values(old, new):
child_doctypes = frappe.db.sql("""\
select options, fieldname from `tabDocField`
where parent=%s and fieldtype='Table'""", (new,), as_dict=1)
custom_child_doctypes = frappe.db.sql("""\
select options, fieldname from `tabCustom Field`
where dt=%s and fieldtype='Table'""", (new,), as_dict=1)
child_doctypes += custom_child_doctypes
fields = [d['fieldname'] for d in child_doctypes]
property_setter_child_doctypes = frappe.db.sql("""\
select value as options from `tabProperty Setter`
where doc_type=%s and property='options' and
field_name in ("%s")""" % ('%s', '", "'.join(fields)),
(new,))
child_doctypes += property_setter_child_doctypes
child_doctypes = (d['options'] for d in child_doctypes)
for doctype in child_doctypes:
frappe.db.sql("""\
update `tab%s` set parenttype=%s
where parenttype=%s""" % (doctype, '%s', '%s'),
(new, old))
def rename_dynamic_links(doctype, old, new):
for df in get_dynamic_link_map().get(doctype, []):
# dynamic link in single, just one value to check
if frappe.get_meta(df.parent).issingle:
refdoc = frappe.db.get_singles_dict(df.parent)
if refdoc.get(df.options)==doctype and refdoc.get(df.fieldname)==old:
frappe.db.sql("""update tabSingles set value=%s where
field=%s and value=%s and doctype=%s""", (new, df.fieldname, old, df.parent))
else:
# because the table hasn't been renamed yet!
parent = df.parent if df.parent != new else old
frappe.db.sql("""update `tab{parent}` set {fieldname}=%s
where {options}=%s and {fieldname}=%s""".format(parent = parent,
fieldname=df.fieldname, options=df.options), (new, doctype, old))
def bulk_rename(doctype, rows=None, via_console = False):
"""Bulk rename documents
:param doctype: DocType to be renamed
:param rows: list of documents as `((oldname, newname), ..)`"""
if not rows:
frappe.throw(_("Please select a valid csv file with data"))
if not via_console:
max_rows = 500
if len(rows) > max_rows:
frappe.throw(_("Maximum {0} rows allowed").format(max_rows))
rename_log = []
for row in rows:
# if row has some content
if len(row) > 1 and row[0] and row[1]:
try:
if rename_doc(doctype, row[0], row[1]):
msg = _("Successful: {0} to {1}").format(row[0], row[1])
frappe.db.commit()
else:
msg = _("Ignored: {0} to {1}").format(row[0], row[1])
except Exception as e:
msg = _("** Failed: {0} to {1}: {2}").format(row[0], row[1], repr(e))
frappe.db.rollback()
if via_console:
print(msg)
else:
rename_log.append(msg)
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype', doctype=doctype)
if not via_console:
return rename_log
def update_linked_doctypes(doctype, docname, linked_to, value, ignore_doctypes=None):
"""
linked_doctype_info_list = list formed by get_fetch_fields() function
docname = Master DocType's name in which modification are made
value = Value for the field thats set in other DocType's by fetching from Master DocType
"""
linked_doctype_info_list = get_fetch_fields(doctype, linked_to, ignore_doctypes)
for d in linked_doctype_info_list:
frappe.db.sql("""
update
`tab{doctype}`
set
{linked_to_fieldname} = "{value}"
where
{master_fieldname} = "{docname}"
and {linked_to_fieldname} != "{value}"
""".format(
doctype = d['doctype'],
linked_to_fieldname = d['linked_to_fieldname'],
value = value,
master_fieldname = d['master_fieldname'],
docname = docname
))
def get_fetch_fields(doctype, linked_to, ignore_doctypes=None):
"""
doctype = Master DocType in which the changes are being made
linked_to = DocType name of the field thats being updated in Master
This function fetches list of all DocType where both doctype and linked_to is found
as link fields.
Forms a list of dict in the form -
[{doctype: , master_fieldname: , linked_to_fieldname: ]
where
doctype = DocType where changes need to be made
master_fieldname = Fieldname where options = doctype
linked_to_fieldname = Fieldname where options = linked_to
"""
master_list = get_link_fields(doctype)
linked_to_list = get_link_fields(linked_to)
out = []
from itertools import product
product_list = product(master_list, linked_to_list)
for d in product_list:
linked_doctype_info = frappe._dict()
if d[0]['parent'] == d[1]['parent'] \
and (not ignore_doctypes or d[0]['parent'] not in ignore_doctypes) \
and not d[1]['issingle']:
linked_doctype_info['doctype'] = d[0]['parent']
linked_doctype_info['master_fieldname'] = d[0]['fieldname']
linked_doctype_info['linked_to_fieldname'] = d[1]['fieldname']
out.append(linked_doctype_info)
return out
|
the-stack_0_24992
|
#-- a simple "extruder" to obtain CityJSON LoD1 Buildings from footprints
#-- Hugo Ledoux <[email protected]>
#-- 2019-02-28
import fiona
import shapely.geometry as sg
import json
import copy
def main():
#-- read the input footprints
c = fiona.open('somebuildings.gpkg')
print("# of features: ", len(c))
lsgeom = [] #-- list of the geometries
lsattributes = [] #-- list of the attributes
for each in c:
lsgeom.append(sg.shape(each['geometry'])) #-- geom are casted to Fiona's
lsattributes.append(each['properties'])
#-- extrude to CityJSON
cm = output_citysjon(lsgeom, lsattributes)
#-- save the file to disk 'mycitymodel.json'
json_str = json.dumps(cm, indent=2)
fout = open("mycitymodel.json", "w")
fout.write(json_str)
print("done.")
def output_citysjon(lsgeom, lsattributes):
#-- create the JSON data structure for the City Model
cm = {}
cm["type"] = "CityJSON"
cm["version"] = "0.9"
cm["CityObjects"] = {}
cm["vertices"] = []
for (i,geom) in enumerate(lsgeom):
footprint = geom[0]
#-- one building
oneb = {}
oneb['type'] = 'Building'
oneb['attributes'] = {}
oneb['attributes']['local-id'] = lsattributes[i]['lokaalid']
oneb['attributes']['bgt_status'] = lsattributes[i]['bgt_status']
oneb['geometry'] = [] #-- a cityobject can have >1
#-- the geometry
g = {}
g['type'] = 'Solid'
g['lod'] = 1
allsurfaces = [] #-- list of surfaces forming the oshell of the solid
#-- exterior ring of each footprint
oring = list(footprint.exterior.coords)
oring.pop() #-- remove last point since first==last
if footprint.exterior.is_ccw == False:
#-- to get proper orientation of the normals
oring.reverse()
extrude_walls(oring, lsattributes[i]['height'], allsurfaces, cm)
#-- interior rings of each footprint
irings = []
interiors = list(footprint.interiors)
for each in interiors:
iring = list(each.coords)
iring.pop() #-- remove last point since first==last
if each.is_ccw == True:
#-- to get proper orientation of the normals
iring.reverse()
irings.append(iring)
extrude_walls(iring, lsattributes[i]['height'], allsurfaces, cm)
#-- top-bottom surfaces
extrude_roof_ground(oring, irings, lsattributes[i]['height'], False, allsurfaces, cm)
extrude_roof_ground(oring, irings, 0, True, allsurfaces, cm)
#-- add the extruded geometry to the geometry
g['boundaries'] = []
g['boundaries'].append(allsurfaces)
#-- add the geom to the building
oneb['geometry'].append(g)
#-- insert the building as one new city object
cm['CityObjects'][lsattributes[i]['gml_id']] = oneb
return cm
def extrude_roof_ground(orng, irngs, height, reverse, allsurfaces, cm):
oring = copy.deepcopy(orng)
irings = copy.deepcopy(irngs)
if reverse == True:
oring.reverse()
for each in irings:
each.reverse()
for (i, pt) in enumerate(oring):
cm['vertices'].append([pt[0], pt[1], height])
oring[i] = (len(cm['vertices']) - 1)
for (i, iring) in enumerate(irings):
for (j, pt) in enumerate(iring):
cm['vertices'].append([pt[0], pt[1], height])
irings[i][j] = (len(cm['vertices']) - 1)
# print(oring)
output = []
output.append(oring)
for each in irings:
output.append(each)
allsurfaces.append(output)
def extrude_walls(ring, height, allsurfaces, cm):
#-- each edge become a wall, ie a rectangle
for (j, v) in enumerate(ring[:-1]):
l = []
cm['vertices'].append([ring[j][0], ring[j][1], 0])
cm['vertices'].append([ring[j+1][0], ring[j+1][1], 0])
cm['vertices'].append([ring[j+1][0], ring[j+1][1], height])
cm['vertices'].append([ring[j][0], ring[j][1], height])
t = len(cm['vertices'])
allsurfaces.append([[t-4, t-3, t-2, t-1]])
#-- last-first edge
l = []
cm['vertices'].append([ring[-1][0], ring[-1][1], 0])
cm['vertices'].append([ring[0][0], ring[0][1], 0])
cm['vertices'].append([ring[0][0], ring[0][1], height])
cm['vertices'].append([ring[-1][0], ring[-1][1], height])
t = len(cm['vertices'])
allsurfaces.append([[t-4, t-3, t-2, t-1]])
if __name__ == '__main__':
main()
|
the-stack_0_24996
|
"""
Wrapper for an `ast.Module` node with corresponding node info.
"""
import ast
from pathlib import Path
from typing import Any, Dict, Generator, List, Optional, Text
from fhdoc.ast_parser.analyzers.module_analyzer import ModuleAnalyzer
from fhdoc.ast_parser.enums import RenderPart
from fhdoc.ast_parser.node_records.attribute_record import AttributeRecord
from fhdoc.ast_parser.node_records.class_record import ClassRecord
from fhdoc.ast_parser.node_records.function_record import FunctionRecord
from fhdoc.ast_parser.node_records.import_record import ImportRecord
from fhdoc.ast_parser.node_records.node_record import NodeRecord
from fhdoc.utils.import_string import ImportString
from fhdoc.utils.indent_trimmer import IndentTrimmer
class ModuleRecord(NodeRecord):
"""
Wrapper for an `ast.Module` node with corresponding node info.
Responsible for parsing Python source as well.
Arguments:
node -- Result of `ast.parse`.
"""
def __init__(self, node):
# type: (ast.Module) -> None
super().__init__(node)
self.all_names = [] # type: List[Text]
self.class_records = [] # type: List[ClassRecord]
self.function_records = [] # type: List[FunctionRecord]
self.import_records = [] # type: List[ImportRecord]
self.source_path = Path("")
self.source_lines = [] # type: List[Text]
self.name = "module"
self.title = ""
self.import_string = ImportString("")
self.import_string_map = {} # type: Dict[ImportString, NodeRecord]
self.docstring = self._get_docstring()
@classmethod
def create_from_source(cls, source_path, import_string):
# type: (Path, ImportString) -> ModuleRecord
"""
Create new `ModuleRecord` from path.
Arguments:
source_path -- Path to a Python source file.
import_string -- File absolute import string.
Returns:
New `ModuleRecord` instance.
"""
content = source_path.read_text(encoding="utf-8")
node = ast.parse(content)
if not isinstance(node, ast.Module):
raise TypeError
record = cls(node)
record.import_string = import_string
record.name = import_string.parts[-1]
record.source_path = source_path
record.source_lines = source_path.read_text(encoding="utf-8").split("\n")
return record
def find_record(self, import_string):
# type: (ImportString) -> Optional[NodeRecord]
"""
Find child in the Module by an absolute or relative import string.
Attributes:
import_string -- record import string.
Returns:
Found child record on None.
"""
if import_string == self.import_string:
return self
result = self.import_string_map.get(import_string)
if result:
return result
return None
def iter_records(self):
# type: () -> Generator[NodeRecord, None, None]
"""
Iterate over Module class, method and fucntion records.
Yields:
A child record.
"""
for class_record in self.class_records:
if self.all_names and class_record.name not in self.all_names:
continue
yield class_record
yield from class_record.iter_records()
for function_record in self.function_records:
if self.all_names and function_record.name not in self.all_names:
continue
yield function_record
def _set_import_strings(self):
# type: () -> None
for class_record in self.class_records:
class_record.import_string = self.import_string + class_record.name
self.import_string_map[class_record.import_string] = class_record
for class_child_record in class_record.iter_records():
class_child_record.import_string = (
class_record.import_string + class_child_record.name
)
self.import_string_map[class_child_record.import_string] = class_child_record
for function_record in self.function_records:
function_record.import_string = self.import_string + function_record.name
self.import_string_map[function_record.import_string] = function_record
for attribute_record in self.attribute_records:
attribute_record.import_string = self.import_string + attribute_record.name
self.import_string_map[attribute_record.import_string] = attribute_record
def _render_parts(self, indent=0):
# type: (int) -> List[Any]
parts = [] # type: List[Any]
if self.import_records:
for import_record in self.import_records:
parts.append(import_record)
parts.append(RenderPart.LINE_BREAK)
parts.append(RenderPart.LINE_BREAK)
if self.class_records:
for class_record in self.class_records:
parts.append(class_record)
parts.append(RenderPart.LINE_BREAK)
parts.append(RenderPart.LINE_BREAK)
for function_record in self.function_records:
parts.append(function_record)
parts.append(RenderPart.LINE_BREAK)
return parts
def build_children(self):
# type: () -> None
"""
Collect full information about Module child records.
Used only when doc for this ModuleRecord is building.
"""
analyzer = ModuleAnalyzer()
analyzer.visit(self.node)
self.all_names = analyzer.all_names
for class_node in analyzer.class_nodes:
self.class_records.append(ClassRecord(class_node))
for function_node in analyzer.function_nodes:
self.function_records.append(FunctionRecord(function_node, is_method=False))
for attribute_node in analyzer.attribute_nodes:
self.attribute_records.append(AttributeRecord(attribute_node))
for import_node in analyzer.import_nodes:
for alias in import_node.names:
self.import_records.append(ImportRecord(import_node, alias))
self.class_records.sort(key=lambda x: x.name)
self.function_records.sort(key=lambda x: x.name)
main_class_lookup_name = self.name.replace("_", "")
for class_record in self.class_records:
class_record.parse()
# find real title
if class_record.name.lower() == main_class_lookup_name:
self.title = class_record.name
self._set_import_strings()
def _parse(self):
# type: () -> None
for attribute_record in self.attribute_records:
attribute_record.docstring = self._get_comment_docstring(attribute_record)
for class_record in self.class_records:
class_record.parse()
for attribute_record in class_record.attribute_records:
attribute_record.docstring = self._get_comment_docstring(attribute_record)
for method_record in class_record.method_records:
method_record.parse()
if method_record.is_classmethod or method_record.is_staticmethod:
method_record.title = f"{class_record.name}.{method_record.name}"
else:
method_record.title = f"{class_record.name}().{method_record.name}"
function_lines = self._get_function_def_lines(method_record)
method_record.parse_type_comments(function_lines)
for function_record in self.function_records:
function_record.parse()
function_lines = self._get_function_def_lines(function_record)
function_record.parse_type_comments(function_lines)
def _get_function_def_lines(self, function_record):
# type: (FunctionRecord) -> List[Text]
"""
Get all function definition lines for comment type
hints lookup.
Removes indentation.
Arguments:
function_record -- Function record for source lookup.
Returns:
Function definition lines as an array.
"""
if not isinstance(function_record.node, (ast.AsyncFunctionDef, ast.FunctionDef)):
raise TypeError
result = [] # type: List[Text]
start_index = function_record.line_number - 1
end_index = function_record.node.body[0].lineno - 1
result = self.source_lines[start_index:end_index]
result = [i.rstrip("\n") for i in result]
result = IndentTrimmer.trim_lines(result)
return result
def _get_comment_docstring(self, node_record):
# type: (NodeRecord) -> Text
"""
Get comment docstring preceding the object from the source code.
Returns only lines starting with `#`, lines joined with a single space.
Arguments:
node_record -- Node record for source lookup.
Returns:
A docstring as a string.
"""
if not isinstance(node_record.node, ast.Assign):
raise TypeError
result = [] # type: List[Text]
start_index = node_record.node.lineno - 2
try:
start_line = self.source_lines[start_index].strip()
except IndexError:
return ""
while start_index >= 0 and start_line.startswith("#"):
line = start_line[1:].strip()
if not line.startswith("FIXME") and not line.startswith("TODO"):
result.append(line)
start_index -= 1
start_line = self.source_lines[start_index].strip()
result.reverse()
return "\n ".join(result)
|
the-stack_0_24998
|
from nonebot.log import logger
from .emoji import emoji, emoji_py
import jieba
import pinyin
def text_to_emoji(text):
try:
text_with_emoji = ''
text_jieba = jieba.cut(text, cut_all=False)
for word in text_jieba:
word = word.strip()
# 分词检索
if word in emoji.keys():
text_with_emoji += emoji[word]
elif word not in emoji.keys():
word_py = pinyin.get(word, format="strip")
# 分词拼音检索
if word_py in emoji_py.keys():
text_with_emoji += emoji_py[word_py]
else:
if len(word) > 0: # if the two characters or more
# 单字检索
for character in word:
if character in emoji.keys():
text_with_emoji += emoji[character]
else:
# 单字拼音检索
character_py = pinyin.get(character, format="strip")
if character_py in emoji_py.keys():
text_with_emoji += emoji_py[character_py]
else:
text_with_emoji += character
else: # 只有一个汉字,前面已经检测过字和拼音都不在抽象词典中,直接加词
text_with_emoji += word.strip()
except Exception as e:
logger.error("文本抽象化失败~")
raise e
return text_with_emoji
|
the-stack_0_25000
|
"""Calculates a cartesian motion path (linear in tool space).
Inputs:
robot: The robot
group: str, optional
The planning group used for calculation. Defaults to the robot's
main planning group.
planes: The planes in world coordinate frame through which the path is defined.
start_configuration: :class:`compas_fab.robots.Configuration`, optional
The robot's full configuration, i.e. values for all configurable
joints of the entire robot cell, at the starting position. Defaults
to the all-zero configuration.
max_step: float
The approximate distance between the calculated points. (Defined in
the robot's units)
avoid_collisions: bool, optional
Whether or not to avoid collisions. Defaults to True.
attached_collision_meshes: list of :class:`compas_fab.robots.AttachedCollisionMesh`
Defaults to None.
compute: bool
Press to calculate solution.
Output:
:class:`compas_fab.robots.JointTrajectory`
The calculated trajectory.
"""
from __future__ import print_function
import scriptcontext as sc
from compas.geometry import Frame
guid = str(ghenv.Component.InstanceGuid)
response_key = "response_" + guid
if response_key not in sc.sticky:
sc.sticky[response_key] = None
frames = [Frame(plane.Origin, plane.XAxis, plane.YAxis) for plane in planes]
if robot and robot.client and start_configuration and compute:
if robot.client.is_connected:
options = {
'max_step': float(max_step),
'avoid_collisions': bool(avoid_collisions),
'attached_collision_meshes': list(attached_colllision_meshes),
}
sc.sticky[response_key] = robot.plan_cartesian_motion(frames,
start_configuration=start_configuration,
group=group,
options=options)
else:
print("Robot client is not connected")
trajectory = sc.sticky[response_key]
|
the-stack_0_25001
|
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
from PIL import Image, ImageSequence
import sys
import time
sys.path.insert(0,'../build/lib.linux-x86_64-3.5/')
import BoardPy
import random
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W,strid):
#num imgs, x,y,channels
return tf.nn.conv2d(x, W, strides=strid, padding='SAME')
def meanpool(x,AVG,strid):
return tf.nn.depthwise_conv2d(x,AVG,strides=strid,padding='VALID')
class Hal:
def __init__(self,DIMS):
averagetensor=tf.constant(1.0/32.0/32.0,shape=[32,32,4,1])
self.DIMS=DIMS
self.epochs=400
self.end=False
self.convolutions=[]
self.full_conn=[]
self.board=BoardPy.BoardPy(*DIMS)
self.placeh_out=numpy.zeros((1,DIMS[0]*DIMS[1]))
DISCOUNT=tf.constant(0.5)
self.train_index=tf.placeholder(tf.int32)
self.sess=tf.InteractiveSession()
self.input_vec=tf.placeholder(tf.float32, shape=[None,DIMS[0],DIMS[1],1])
#self.x_image=tf.reshape(self.input_vec,[-1,DIMS[0],DIMS[1],4])
#self.out_next=tf.placeholder(tf.float32,shape=[None,DIMS[0]*DIMS[1]])
self.target=tf.placeholder(tf.float32)
#Build convolutional layers
s=self.convolutions
#self.convolutions.append(self._newconvlayer([32,32,4,11],self.input_vec,[1,32,32,1]))
print(self.input_vec.get_shape())
self.convolutions.append([meanpool(self.input_vec,averagetensor,[1,32,32,1]),[-1,10,10,4]]) #Average each 32 block intoo a 10 x 10 matrix
print(s[-1][0].get_shape())
self.convolutions.append(self._newconvlayer([3,3,4,100],s[-1][0],[1,1,1,1])) #Try doing a max pool?
#self.convolutions.append((tf.nn.max_pool(s[-1][0], ksize=[1, 1, 1, 5],
# strides=[1, 1, 1, 5], padding='SAME'),[]))
self.convolutions.append(self._newconvlayer([3,3,100,25],s[-1][0],[1,1,1,1]))
self.convolutions.append(self._newconvlayer([3,3,25,50],s[-1][0],[1,1,1,1]))
print ((s[0][0]).get_shape())
print ((s[1][0]).get_shape())
lastconv=tf.reshape(self.convolutions[-1][0],[-1, DIMS[0]*DIMS[1]*50])
#build fully connected layers
f=self.full_conn
self.full_conn.append(self._newconnectlayer([DIMS[0]*DIMS[1]*50,DIMS[0]*DIMS[1]],lastconv))
self.out_vec=self.full_conn[-1][0]#tf.nn.softmax(self.full_conn[-1][0])
#Training stuff
Q_curr=self.out_vec[0][self.train_index]
self.error=tf.reduce_mean(tf.square(tf.sub(self.target,Q_curr)))
self.train_step=tf.train.AdamOptimizer(1e-4).minimize(self.error)
#tf.global_variables_initializer()
self.sess.run(tf.initialize_all_variables())
def _newconvlayer(self,dims,last,strides):
weights=weight_variable(dims)
bias=bias_variable([dims[-1]])
h=tf.nn.relu(conv2d(last,weights,strides)+bias)
return (h,dims,(weights,bias))
def _newconnectlayer(self,dims,last):
weights=weight_variable(dims)
bias=bias_variable([dims[-1]])
h=tf.nn.relu(tf.matmul(last,weights)+bias)
return (h,dims,(weights,bias))
def perform_click(self, out_vec):
reward={1:-.1,2:-.1,0:0.05}
v=out_vec.argmax()
sample=random.uniform(0,1)
if sample<0.01:
x=random.randint(0,9)
y=random.randint(0,9)
ret=self.board.click(x,y)
else:
ret=self.board.click(v//self.DIMS[0],v%self.DIMS[1])
self.click_filter[v]=0
#print (ret)
if ret==2: #Game end
#print ('f to pay respecks')
self.end=True
return reward[ret]
def evaluate(self):
while not self.end:
image=self.board.imgboard.view(numpy.uint8).reshape(1,self.DIMS[0]*32,-1,4)
image=(numpy.array(image,dtype=numpy.float32)-127.0)/255.0
guess_out,=self.sess.run([self.out_vec],feed_dict={self.input_vec:image,self.target:0,self.train_index:0})
self.rew=self.perform_click(guess_out)
def train(self):
image=((self.board.mineboard-3.5)/2.8722813232690143) #Normalize mineboard
image=numpy.pad(image,[1,1],'constant').reshape(-1,self.DIMS[0],self.DIMS[1],1) #reshape to dims
zeros=numpy.zeros((1,self.DIMS[0]*self.DIMS[1]))
guess_out,=self.sess.run([self.out_vec],feed_dict={self.input_vec:image,self.target:0,self.train_index:0})
guess_out[0]=numpy.multiply(guess_out[0],self.click_filter)
rew=self.perform_click(guess_out)
image_1=self.board.imgboard.view(numpy.uint8).reshape(1,self.DIMS[0]*32,-1,4)
image_1=(numpy.array(image,dtype=numpy.float32)-127.0)/255.0
nextQ,=self.sess.run([self.out_vec],feed_dict={self.input_vec:image,self.target:0,self.train_index:0})
nextQ[0]=numpy.multiply(nextQ[0],self.click_filter)
nextQ=nextQ[0][nextQ.argmax()]
target=rew+0.5*nextQ
self.last=self.click_filter.copy()
self.sess.run([self.train_step],feed_dict={self.input_vec:image,self.target:target,self.train_index:guess_out.argmax()})
def eval(self):
scores=[]
for i in range(self.epochs):
self.rew=None
self.currQ=None
self.click_filter=numpy.full(10*10,1.0)
self.last=self.click_filter.copy()
if (i%10==0):
print('epoch',i)
self.end=False
#self.board.click(5,5)
#self.click_filter[55]=1.0
while not self.end:
self.train()
#self.train()
scores.append(self.board.score())
self.board.remake(10,10)
self.end=False
plt.plot(numpy.arange(0,len(scores)),scores)
plt.savefig('progress.png')
self.board.click(5,5)
while not self.end:
image=self.board.imgboard.view(numpy.uint8).reshape(1,self.DIMS[0]*32,-1,4)
image=(numpy.array(image,dtype=numpy.float32)-127.0)/255.0
guess_out,=self.sess.run([self.out_vec],feed_dict={self.input_vec:image,self.target:0,self.train_index:0})
guess_out=numpy.multiply(guess_out,self.click_filter)
reward=self.perform_click(guess_out)
img=Image.fromarray(self.board.imgboard.view(numpy.uint8).reshape(10*32,-1,4))
img.show()
if __name__=='__main__':
h=Hal((10,10))
h.eval()
|
the-stack_0_25002
|
import sys
import json
print("Prend en entrée un argument : un fichier de retour au texte (JSON)")
print("Exemple : python 04_get_examples.py retour_au_texte_min\=4_max\=5.json.resultat.json")
path_file = sys.argv[1]
f = open(path_file)
retour_au_texte = json.load(f)
f.close()
liste_sequences = retour_au_texte.keys()
query = "a"
while query != "":
print("Veuillez entrer une séquence (sous la forme 'tag_tag_tag_tag') : ")
query = input()
if query in liste_sequences :
print(json.dumps(retour_au_texte[query], indent = 2))
|
the-stack_0_25005
|
import torch
import time
import numpy as np
import six
class TrainHandler:
def __init__(self,
train_loader,
valid_loader,
model,
criterion,
optimizer,
model_path,
batch_size=32,
epochs=5,
scheduler=None,
gpu_num=0):
self.train_loader = train_loader
self.valid_loader = valid_loader
self.criterion = criterion
self.optimizer = optimizer
self.model_path = model_path
self.batch_size = batch_size
self.epochs = epochs
self.scheduler = scheduler
if torch.cuda.is_available():
self.device = torch.device(f'cuda:{gpu_num}')
print('Training device is gpu:{gpu_num}')
else:
self.device = torch.device('cpu')
print('Training device is cpu')
self.model = model.to(self.device)
def _train_func(self):
train_loss = 0
train_correct = 0
for i, (x, y) in enumerate(self.train_loader):
self.optimizer.zero_grad()
x, y = x.to(self.device).long(), y.to(self.device)
output = self.model(x)
loss = self.criterion(output, y)
train_loss += loss.item()
loss.backward()
self.optimizer.step()
train_correct += (output.argmax(1) == y).sum().item()
if self.scheduler is not None:
self.scheduler.step()
return train_loss / len(self.train_loader), train_correct / len(self.train_loader.dataset)
def _test_func(self):
valid_loss = 0
valid_correct = 0
for x, y in self.valid_loader:
x, y = x.to(self.device).long(), y.to(self.device)
with torch.no_grad():
output = self.model(x)
loss = self.criterion(output, y)
valid_loss += loss.item()
valid_correct += (output.argmax(1) == y).sum().item()
return valid_loss / len(self.valid_loader), valid_correct / len(self.valid_loader.dataset)
def train(self):
min_valid_loss = float('inf')
for epoch in range(self.epochs):
start_time = time.time()
train_loss, train_acc = self._train_func()
valid_loss, valid_acc = self._test_func()
if min_valid_loss > valid_loss:
min_valid_loss = valid_loss
torch.save(self.model, self.model_path)
print(f'\tSave model done valid loss: {valid_loss:.4f}')
secs = int(time.time() - start_time)
mins = secs / 60
secs = secs % 60
print('Epoch: %d' % (epoch + 1), " | time in %d minutes, %d seconds" % (mins, secs))
print(f'\tLoss: {train_loss:.4f}(train)\t|\tAcc: {train_acc * 100:.1f}%(train)')
print(f'\tLoss: {valid_loss:.4f}(valid)\t|\tAcc: {valid_acc * 100:.1f}%(valid)')
def torch_text_process():
from torchtext import data
def tokenizer(text):
import jieba
return list(jieba.cut(text))
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=20)
LABEL = data.Field(sequential=False, use_vocab=False)
all_dataset = data.TabularDataset.splits(path='',
train='LCQMC.csv',
format='csv',
fields=[('sentence1', TEXT), ('sentence2', TEXT), ('label', LABEL)])[0]
TEXT.build_vocab(all_dataset)
train, valid = all_dataset.split(0.1)
(train_iter, valid_iter) = data.BucketIterator.splits(datasets=(train, valid),
batch_sizes=(64, 128),
sort_key=lambda x: len(x.sentence1))
return train_iter, valid_iter
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='post', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the end.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
for x in sequences:
try:
lengths.append(len(x))
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
if __name__ == '__main__':
torch_text_process()
|
the-stack_0_25006
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Application default credentials.
Implements application default credentials and project ID detection.
"""
import io
import json
import os
import six
from google.auth import _default
from google.auth import environment_vars
from google.auth import exceptions
def load_credentials_from_file(filename, scopes=None, quota_project_id=None):
"""Loads Google credentials from a file.
The credentials file must be a service account key or stored authorized
user credentials.
Args:
filename (str): The full path to the credentials file.
scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If
specified, the credentials will automatically be scoped if
necessary
quota_project_id (Optional[str]): The project ID used for
quota and billing.
Returns:
Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
credentials and the project ID. Authorized user credentials do not
have the project ID information.
Raises:
google.auth.exceptions.DefaultCredentialsError: if the file is in the
wrong format or is missing.
"""
if not os.path.exists(filename):
raise exceptions.DefaultCredentialsError(
"File {} was not found.".format(filename)
)
with io.open(filename, "r") as file_obj:
try:
info = json.load(file_obj)
except ValueError as caught_exc:
new_exc = exceptions.DefaultCredentialsError(
"File {} is not a valid json file.".format(filename), caught_exc
)
six.raise_from(new_exc, caught_exc)
# The type key should indicate that the file is either a service account
# credentials file or an authorized user credentials file.
credential_type = info.get("type")
if credential_type == _default._AUTHORIZED_USER_TYPE:
from google.oauth2 import _credentials_async as credentials
try:
credentials = credentials.Credentials.from_authorized_user_info(
info, scopes=scopes
).with_quota_project(quota_project_id)
except ValueError as caught_exc:
msg = "Failed to load authorized user credentials from {}".format(filename)
new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
six.raise_from(new_exc, caught_exc)
if not credentials.quota_project_id:
_default._warn_about_problematic_credentials(credentials)
return credentials, None
elif credential_type == _default._SERVICE_ACCOUNT_TYPE:
from google.oauth2 import _service_account_async as service_account
try:
credentials = service_account.Credentials.from_service_account_info(
info, scopes=scopes
).with_quota_project(quota_project_id)
except ValueError as caught_exc:
msg = "Failed to load service account credentials from {}".format(filename)
new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
six.raise_from(new_exc, caught_exc)
return credentials, info.get("project_id")
else:
raise exceptions.DefaultCredentialsError(
"The file {file} does not have a valid type. "
"Type is {type}, expected one of {valid_types}.".format(
file=filename, type=credential_type, valid_types=_default._VALID_TYPES
)
)
def _get_gcloud_sdk_credentials():
"""Gets the credentials and project ID from the Cloud SDK."""
from google.auth import _cloud_sdk
# Check if application default credentials exist.
credentials_filename = _cloud_sdk.get_application_default_credentials_path()
if not os.path.isfile(credentials_filename):
return None, None
credentials, project_id = load_credentials_from_file(credentials_filename)
if not project_id:
project_id = _cloud_sdk.get_project_id()
return credentials, project_id
def _get_explicit_environ_credentials():
"""Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
variable."""
explicit_file = os.environ.get(environment_vars.CREDENTIALS)
if explicit_file is not None:
credentials, project_id = load_credentials_from_file(
os.environ[environment_vars.CREDENTIALS]
)
return credentials, project_id
else:
return None, None
def _get_gae_credentials():
"""Gets Google App Engine App Identity credentials and project ID."""
# While this library is normally bundled with app_engine, there are
# some cases where it's not available, so we tolerate ImportError.
return _default._get_gae_credentials()
def _get_gce_credentials(request=None):
"""Gets credentials and project ID from the GCE Metadata Service."""
# Ping requires a transport, but we want application default credentials
# to require no arguments. So, we'll use the _http_client transport which
# uses http.client. This is only acceptable because the metadata server
# doesn't do SSL and never requires proxies.
# While this library is normally bundled with compute_engine, there are
# some cases where it's not available, so we tolerate ImportError.
return _default._get_gce_credentials(request)
def default_async(scopes=None, request=None, quota_project_id=None):
"""Gets the default credentials for the current environment.
`Application Default Credentials`_ provides an easy way to obtain
credentials to call Google APIs for server-to-server or local applications.
This function acquires credentials from the environment in the following
order:
1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
to the path of a valid service account JSON private key file, then it is
loaded and returned. The project ID returned is the project ID defined
in the service account file if available (some older files do not
contain project ID information).
2. If the `Google Cloud SDK`_ is installed and has application default
credentials set they are loaded and returned.
To enable application default credentials with the Cloud SDK run::
gcloud auth application-default login
If the Cloud SDK has an active project, the project ID is returned. The
active project can be set using::
gcloud config set project
3. If the application is running in the `App Engine standard environment`_
(first generation) then the credentials and project ID from the
`App Identity Service`_ are used.
4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or
the `App Engine flexible environment`_ or the `App Engine standard
environment`_ (second generation) then the credentials and project ID
are obtained from the `Metadata Service`_.
5. If no credentials are found,
:class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.
.. _Application Default Credentials: https://developers.google.com\
/identity/protocols/application-default-credentials
.. _Google Cloud SDK: https://cloud.google.com/sdk
.. _App Engine standard environment: https://cloud.google.com/appengine
.. _App Identity Service: https://cloud.google.com/appengine/docs/python\
/appidentity/
.. _Compute Engine: https://cloud.google.com/compute
.. _App Engine flexible environment: https://cloud.google.com\
/appengine/flexible
.. _Metadata Service: https://cloud.google.com/compute/docs\
/storing-retrieving-metadata
.. _Cloud Run: https://cloud.google.com/run
Example::
import google.auth
credentials, project_id = google.auth.default()
Args:
scopes (Sequence[str]): The list of scopes for the credentials. If
specified, the credentials will automatically be scoped if
necessary.
request (google.auth.transport.Request): An object used to make
HTTP requests. This is used to detect whether the application
is running on Compute Engine. If not specified, then it will
use the standard library http client to make requests.
quota_project_id (Optional[str]): The project ID used for
quota and billing.
Returns:
Tuple[~google.auth.credentials.Credentials, Optional[str]]:
the current environment's credentials and project ID. Project ID
may be None, which indicates that the Project ID could not be
ascertained from the environment.
Raises:
~google.auth.exceptions.DefaultCredentialsError:
If no credentials were found, or if the credentials found were
invalid.
"""
from google.auth._credentials_async import with_scopes_if_required
explicit_project_id = os.environ.get(
environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT)
)
checkers = (
_get_explicit_environ_credentials,
_get_gcloud_sdk_credentials,
_get_gae_credentials,
lambda: _get_gce_credentials(request),
)
for checker in checkers:
credentials, project_id = checker()
if credentials is not None:
credentials = with_scopes_if_required(
credentials, scopes
).with_quota_project(quota_project_id)
effective_project_id = explicit_project_id or project_id
if not effective_project_id:
_default._LOGGER.warning(
"No project ID could be determined. Consider running "
"`gcloud config set project` or setting the %s "
"environment variable",
environment_vars.PROJECT,
)
return credentials, effective_project_id
raise exceptions.DefaultCredentialsError(_default._HELP_MESSAGE)
|
the-stack_0_25007
|
# -*- coding: utf-8 -*-
from zerver.lib.test_classes import WebhookTestCase
from zerver.lib.users import get_api_key
class DropboxHookTests(WebhookTestCase):
STREAM_NAME = 'test'
URL_TEMPLATE = "/api/v1/external/dropbox?&api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'dropbox'
def test_file_updated(self) -> None:
expected_topic = u"Dropbox"
expected_message = u"File has been updated on Dropbox!"
self.send_and_test_stream_message('file_updated', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("dropbox", fixture_name, file_type="json")
def test_verification_request(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
get_params = {'stream_name': self.STREAM_NAME,
'challenge': '9B2SVL4orbt5DxLMqJHI6pOTipTqingt2YFMIO0g06E',
'api_key': get_api_key(self.test_user)}
result = self.client_get(self.url, get_params)
self.assert_in_response('9B2SVL4orbt5DxLMqJHI6pOTipTqingt2YFMIO0g06E', result)
|
the-stack_0_25008
|
#!/usr/bin/env python3
from bspump import BSPumpApplication, Pipeline
import bspump.trigger
import bspump.common
import bspump.abc
import bspump.mongodb
import logging
##
L = logging.getLogger(__name__)
"""
This example connects to mongodb running on our oilvan server.
#host = //127.0.0.1:27017
#databaseb=users
#collections=user_location
query's the database according to the parameters passed into query_parms
and outputs the result into console
"""
class MyApplication(BSPumpApplication):
def __init__(self):
super().__init__()
svc = self.get_service("bspump.PumpService")
svc.add_connection(bspump.mongodb.MongoDBConnection(self, config={
"host": "mongodb://127.0.0.1:27017"}))
svc.add_pipeline(MyPipeline0(self))
class MyPipeline0(Pipeline):
def __init__(self, app, pipeline_id=None):
super().__init__(app, pipeline_id)
"""
query_parms takes three parms (filter,projection,number of records). If you don't pass either of these.
Default is None.
Example on how to use queryparms
query_parms = {
"filter": { '_id': 'E48D8C-hAP%20ac%C2%B2-D7160BFE779D'},
"projection" : "{ '_timestamp' }",
"limit": "0"
}
"""
query_parms = {}
self.build(
bspump.mongodb.MongoDBSource(app, self, "MongoDBConnection", query_parms=query_parms,
config={'database':'users',
'collection':'user_location'
}).on(bspump.trigger.RunOnceTrigger(app)),
bspump.common.PPrintSink(app, self))
if __name__ == '__main__':
app = MyApplication()
app.run()
|
the-stack_0_25011
|
"""github4.py."""
import sys
if sys.version_info[:2] >= (3, 8):
from importlib.metadata import version, PackageNotFoundError # pragma: no cover
else:
from importlib_metadata import version, PackageNotFoundError # pragma: no cover
try:
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
from .api import enterprise_login
from .api import login
from .exceptions import GitHubError
from .github import GitHub
from .github import GitHubEnterprise
__all__ = (
"GitHub",
"GitHubEnterprise",
"GitHubError",
"authorize",
"login",
"enterprise_login",
"emojis",
"gist",
"gitignore_template",
"create_gist",
"issue",
"markdown",
"octocat",
"organization",
"pull_request",
"followers_of",
"followed_by",
"public_gists",
"gists_by",
"issues_on",
"gitignore_templates",
"all_repositories",
"all_users",
"all_events",
"organizations_with",
"repositories_by",
"starred_by",
"subscriptions_for",
"rate_limit",
"repository",
"search_code",
"search_repositories",
"search_users",
"search_issues",
"user",
"zen",
)
|
the-stack_0_25014
|
"""Entities in the app."""
# pylint: disable=unsubscriptable-object
import logging
import threading
import weakref
import redis
from cached_property import threaded_cached_property
from cosette import lastfm, youtube, settings
log = logging.getLogger(__name__)
redisconn = redis.StrictRedis(host=settings.REDIS_HOST, db=settings.REDIS_DB)
class Track(object):
"""Represent a single track.
A track has name, artist and playcount. It has also two more expensive
property: youtube_id and thumbnail_url.
Because of some properties are expensive to get, Track objects follow
flyweight pattern and expensive properties are cached in the corresponding
track object.
"""
tracks = weakref.WeakValueDictionary()
lock = threading.RLock()
REDIS_KEY_PT = 'track:%s'
def __new__(cls, artist, name, playcount):
if isinstance(name, bytes):
name = name.decode('utf-8')
key = (artist, name)
with cls.lock:
self = cls.tracks.get(key)
if not self:
self = object.__new__(cls)
self.artist = artist
self.name = name
self.playcount = int(playcount)
cls.tracks[key] = self
return self
def __repr__(self):
return "Track(artist=%r, name=%r, playcount=%d)" % (
self.artist,
self.name,
self.playcount,
)
def __str__(self):
return '%s - %s' % (self.artist, self.name)
def _fill_youtube_values(self, query):
items = youtube.search(q=query)
if not items:
return None
youtube_id = items[0]['id']['videoId']
thumbnail = items[0]['snippet']['thumbnails']['default']
ret = {
'youtube_id': youtube_id,
'thumbnail_url': thumbnail['url'],
'thumbnail_width': thumbnail['width'],
'thumbnail_height': thumbnail['height']
}
rkey = self.REDIS_KEY_PT % query
redisconn.hmset(rkey, ret)
return ret
@threaded_cached_property
def youtube_id(self):
"""Return youtube id of the track.
If it is called earlier, return cached value, otherwise check redis first,
then search youtube if there's nothing in redis.
"""
query = str(self)
rkey = self.REDIS_KEY_PT % query
youtube_id = redisconn.hget(rkey, 'youtube_id')
if not youtube_id:
vals = self._fill_youtube_values(query)
youtube_id = vals['youtube_id']
if isinstance(youtube_id, bytes):
youtube_id = youtube_id.decode('utf-8')
return youtube_id
@threaded_cached_property
def thumbnail_url(self):
"""Return url of a thumbnail for the track.
It works almost the same with youtube_id property.
"""
query = str(self)
rkey = self.REDIS_KEY_PT % query
thumbnail = redisconn.hget(rkey, 'thumbnail_url')
if not thumbnail:
vals = self._fill_youtube_values(query)
thumbnail = vals['thumbnail_url']
if isinstance(thumbnail, bytes):
thumbnail = thumbnail.decode('utf-8')
return thumbnail
@classmethod
def save_broken_track(cls, youtube_id, name):
"""Save broken track for later investigation."""
redisconn.sadd('broken_tracks', '%s|%s' % (youtube_id, name))
@classmethod
def save_hits(cls, hits):
"""Save hit tracks to redis.
Saved tracks are not used at the moment. We are saving them for later use.
"""
if hits:
redisconn.sadd('savedhits', *(str(h) for h in hits))
class Artist(object):
"""Represent a single artist.
Artist objects follow flyweight pattern, this way expensive operations (such as
calling lastfm or redis) won't happen more than once across requests.
"""
HIT_THRESHOLD = 0.4
PLAYCOUNT_THRESHOLD = 10000
TRACK_COUNT = 3
SECOND_TRACK_MAX_PLAYCOUNT = 1000000
artists = weakref.WeakValueDictionary()
lock = threading.RLock()
def __new__(cls, name):
if isinstance(name, bytes):
name = name.decode('utf-8')
with cls.lock:
self = cls.artists.get(name)
if not self:
self = object.__new__(cls)
self.name = name
cls.artists[name] = self
return self
def __repr__(self):
return "Artist('%s')" % self.name
def __str__(self):
return self.name
@threaded_cached_property
def similar_artists(self):
"""Return similar artist to the current Artist object.
This property is cached, if it is not called earlier, it checks redis first,
then calls lastfm api.
"""
rkey = 'similar:%s' % self.name
artists = redisconn.lrange(rkey, 0, -1)
if not artists:
if redisconn.sismember('hasnosimilarartist', self.name):
log.info('%s has no similar artists', self.name)
return []
resp = lastfm.call_api(method='artist.getsimilar', artist=self.name)
artists = [a['name'] for a in resp['similarartists']['artist']]
if artists:
redisconn.rpush(rkey, *artists)
redisconn.sadd('artists', *(a.lower() for a in artists))
else:
log.info('No similar artists found for %s', self.name)
redisconn.sadd('hasnosimilarartist', self.name)
return [Artist(name) for name in artists]
@threaded_cached_property
def top_tracks(self):
"""Return top tracks of the artist.
Like any expensive property in Cosette, its cost is reduced by using flyweight
pattern, caching properties, saving the value to the redis for later use. If none
of the previous steps supplies a value, then calls lastfm api.
"""
rkey = 'toptracks:%s' % self.name
toptracks = redisconn.zrevrange(rkey, 0, self.TRACK_COUNT, withscores=True)
if not toptracks:
resp = lastfm.call_api(method='artist.gettoptracks', artist=self.name)
toptracks = {}
for item in resp['toptracks']['track']:
toptracks[item['name']] = int(item['playcount'])
if toptracks:
redisconn.zadd(rkey, **toptracks)
# prepare the data as returned from redis
toptracks = sorted(
toptracks.items(), key=lambda x: x[1], reverse=True)
# trim down the track count, we won't need most of it anyway
toptracks = toptracks[:self.TRACK_COUNT]
return [Track(artist=self, name=t[0], playcount=t[1])
for t in toptracks]
@threaded_cached_property
def hit_track(self):
"""Return the one hit wonder if there's one, otherwise return None."""
tracks = self.top_tracks
try:
first, second = tracks[0], tracks[1]
except IndexError:
return None
if second.playcount > self.SECOND_TRACK_MAX_PLAYCOUNT:
return None
ratio = float(second.playcount) / first.playcount
if (ratio < self.HIT_THRESHOLD and
first.playcount > self.PLAYCOUNT_THRESHOLD):
return first
return None
@classmethod
def is_artist(cls, name):
"""Check the given name is an artist name."""
return redisconn.sismember('artists', name)
class Tag(object):
"""Represent a tag (or genre).
Like other things in Cosette, Tag objects follow flyweight pattern as well.
"""
tags = weakref.WeakValueDictionary()
lock = threading.RLock()
def __new__(cls, name):
with cls.lock:
self = cls.tags.get(name)
if not self:
self = object.__new__(cls)
self.name = name
cls.tags[name] = self
return self
@threaded_cached_property
def top_artists(self):
"""Return top artists for the tag."""
rkey = 'topartists:%s' % self.name
artists = redisconn.lrange(rkey, 0, -1)
if not artists:
if redisconn.sismember('isnottag', self.name):
log.info('%s has no top artist', self.name)
return []
resp = lastfm.call_api(method='tag.gettopartists', tag=self.name, limit=100)
artists = [t['name'] for t in resp['topartists']['artist']]
if artists:
resp = lastfm.call_api(
method='tag.gettopartists',
tag=self.name,
limit=100,
page=2
)
artists.extend(t['name'] for t in resp['topartists']['artist'])
redisconn.rpush(rkey, *artists)
redisconn.sadd('artists', *(a.lower() for a in artists))
else:
redisconn.sadd('isnottag', self.name)
log.info('No top artists has found for %s', self.name)
return [Artist(name) for name in artists]
@classmethod
def is_tag(cls, name):
"""Check the given name is a tag or not."""
return redisconn.sismember('tags', name)
class Playlist(object):
"""Represent playlist.
Playlist objects are redis backed.
"""
def __init__(self, name, length=50):
self.name = name
self.length = length
def add(self, track):
"""Add given track to the playlist."""
redisconn.lpush(
self.name,
'%s|%s|%s' % (track['youtubeId'], track['thumbnailUrl'], track['name'])
)
redisconn.ltrim(self.name, 0, self.length - 1)
def list(self):
"""Fetch playlist from redis and return the tracks as list of dicts."""
playlist = redisconn.lrange(self.name, 0, -1)
items = []
for elem in playlist:
elem = elem.decode('utf-8')
youtube_id, thumbnail_url, name = elem.split('|')
items.append({
'youtubeId': youtube_id,
'thumbnailUrl': thumbnail_url,
'name': name
})
return items
|
the-stack_0_25015
|
import datetime
import random
import re
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.hashcompat import sha_constructor
from couchdbkit.ext.django.schema import *
from django_couchdb_utils.auth.models import User
SHA1_RE = re.compile('^[a-f0-9]{40}$')
def activate_user(activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if not SHA1_RE.search(activation_key):
return False
user = User.get_by_key(activation_key)
if not user.activation_key_expired():
del user.activation_key
user.is_active = True
user.save()
return user
def create_inactive_user(username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User()
new_user.username = username
new_user.email = email
new_user.set_password(password)
new_user.is_active = False
create_profile(new_user)
new_user.save()
if send_email:
new_user.send_activation_email(site)
return new_user
def create_profile(user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = sha_constructor(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
user.activation_key = sha_constructor(salt+username).hexdigest()
def delete_expired_users():
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for user in User.all_users():
if user.activation_key_expired():
if not user.is_active:
user.delete()
def get_migration_user_data(user):
"""
Returns the data that will be merged into the User object
when migrating an ORM-based User to CouchDB
"""
try:
reg_profile = RegistrationProfile.objects.get(user=user)
if reg_profile.activation_key != RegistrationProfile.ACTIVATED and \
not user.is_active:
return {'activation_key': reg_profile.activation_key}
except:
return {}
class User(User):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
activation_key = StringProperty()
class Meta:
app_label = 'registration_couchdb'
@classmethod
def get_by_key(cls, key):
r = cls.view('registration_couchdb/users_by_activationkey',
key = key,
include_docs = True,
)
return r.first() if r else None
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return bool(getattr(self, 'activation_key', False) and \
(self.date_joined + expiration_date <= datetime.datetime.now()))
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('registration/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/activation_email.txt',
ctx_dict)
self.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
the-stack_0_25016
|
"""
Basic endpoint: /cars
"""
from typing import List, Generator
from fastapi import APIRouter, Depends, HTTPException, Path
from sqlalchemy.orm import Session
from starlette.status import HTTP_201_CREATED
from app.controllers.cars_controller import get_cars, get_car, create_car, update_car, delete_car
from app.schemas.cars_schema import CarsInDBBase, CarsCreate, CarsUpdate
from app.schemas.users_schema import UsersBase
from app.settings.mysql_settings import SessionLocal
from app.utils.auth import get_current_active_user
#pylint: disable=invalid-name
router = APIRouter()
#pylint: enable=invalid-name
def db_session() -> Generator:
"""
Get database connection with DI (Dependencies Injection)
"""
dbsession = SessionLocal()
try:
yield dbsession
finally:
dbsession.close()
@router.get("/", response_model=List[CarsInDBBase])
def get_all_cars(
sql: Session = Depends(db_session),
current_user: UsersBase = Depends(get_current_active_user)
):
"""return cars record"""
if current_user.Status:
raise HTTPException(status_code=400, detail="Inactive user")
result = get_cars(sql)
return result
@router.get("/{car_id}", response_model=CarsInDBBase)
def get_car_by_id(
car_id: int = Path(..., title="The Id of the car to get", ge=0),
sql: Session = Depends(db_session),
current_user: UsersBase = Depends(get_current_active_user)
):
"""return a specific car record"""
if current_user.Status:
raise HTTPException(status_code=400, detail="Inactive user")
result = get_car(sql, car_id=car_id)
if result is None:
raise HTTPException(status_code=404, detail="Car not found")
return result
@router.post("/", response_model=CarsCreate, status_code=HTTP_201_CREATED)
def add_new_car(
newcar: CarsCreate,
sql: Session = Depends(db_session),
current_user: UsersBase = Depends(get_current_active_user)
):
"""
Create a car with all the information:
- **name**: must have a name
- **price**: required
"""
if current_user.Status:
raise HTTPException(status_code=400, detail="Inactive user")
result = create_car(sql, car=newcar)
return result
@router.put("/{car_id}", response_model=CarsUpdate)
def update_car_by_id(
car: CarsUpdate,
car_id: int = Path(..., title="The Id of the car to be updated", ge=0),
sql: Session = Depends(db_session),
current_user: UsersBase = Depends(get_current_active_user)
):
"""
update a car with all the information:
- **id**: set the Id of the car, it's required
- **name**: must have a name
- **price**: required
"""
if current_user.Status:
raise HTTPException(status_code=400, detail="Inactive user")
result = update_car(sql, car_id=car_id, car=car)
if result is None:
raise HTTPException(status_code=404, detail="Car not found")
return result
@router.delete("/{car_id}")
def delete_car_by_id(
car_id: int = Path(..., title="The Id of the car to be deleted", ge=0),
sql: Session = Depends(db_session),
current_user: UsersBase = Depends(get_current_active_user)
):
"""delete a specific car"""
if current_user.Status:
raise HTTPException(status_code=400, detail="Inactive user")
result = delete_car(sql, car_id=car_id)
return result
|
the-stack_0_25017
|
# coding :utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#from .market_config import stock_market,future_market,HK_stock_market,US_stock_market
import datetime
from QUANTAXIS.QAUtil import QA_util_log_info
class QA_Market():
# 基础设置
def init(self):
self.type = '2x'
self.tick = 'day'
self.slipper = '0.0005'
# client=QA_Setting.client
# client=QA.QA_util_sql_mongo_setting()
# db= client.market
def receive_bid(self, bid, client):
if self.type == '2x' and self.tick == 'day':
coll = client.quantaxis.stock_day
elif self.type == '3x' and self.tick == '500ms':
coll = client.quantaxis.future_ms
try:
item = coll.find_one(
{"code": str(bid['code'])[0:6], "date": str(bid['time'])[0:10]})
QA_util_log_info('==== Market Board ====')
QA_util_log_info('date' + str(bid['time']))
QA_util_log_info('day High' + str(item["high"]))
QA_util_log_info('your bid price' + str(bid['price']))
QA_util_log_info('day Low' + str(item["low"]))
QA_util_log_info('amount' + str(bid["amount"]))
QA_util_log_info('towards' + str(bid["towards"]))
QA_util_log_info('==== Market Board ====')
if (float(bid['price']) < float(item["high"]) and
float(bid['price']) > float(item["low"]) or
float(bid['price']) == float(item["low"]) or
float(bid['price']) == float(item['high'])) and \
float(bid['amount']) < float(item['volume']) / 8:
QA_util_log_info("deal success")
message = {
'header': {
'source': 'market',
'status': 200,
'session': {
'user': str(bid['user']),
'strategy': str(bid['strategy'])
}
},
'body': {
'bid': {
'price': str(bid['price']),
'code': str(bid['code']),
'amount': int(bid['amount']),
'time': str(bid['time']),
'towards': bid['towards']
},
'market': {
'open': item['open'],
'high': item['high'],
'low': item['low'],
'close': item['close'],
'volume': item['volume'],
'code': item['code']
}
}
}
# QA_signal_send(message,client)
# print(message['body']['bid']['amount'])
return message
else:
QA_util_log_info('not success')
if int(bid['price']) == 0:
status_mes = 401
else:
status_mes = 402
message = {
'header': {
'source': 'market',
'status': status_mes,
'session': {
'user': str(bid['user']),
'strategy': str(bid['strategy'])
}
},
'body': {
'bid': {
'price': str(bid['price']),
'code': str(bid['code']),
'amount': int(bid['amount']),
'time': str(bid['time']),
'towards': bid['towards']
},
'market': {
'open': item['open'],
'high': item['high'],
'low': item['low'],
'close': item['close'],
'volume': item['volume'],
'code': item['code']
}
}
}
# print(message['body']['bid']['amount'])
return message
except:
QA_util_log_info('no market data')
message = {
'header': {
'source': 'market',
'status': 500,
'session': {
'user': str(bid['user']),
'strategy': str(bid['strategy'])
}
},
'body': {
'bid': {
'price': str(bid['price']),
'code': str(bid['code']),
'amount': int(bid['amount']),
'time': str(bid['time']),
'towards': bid['towards']
},
'market': {
'open': 0,
'high': 0,
'low': 0,
'close': 0,
'volume': 0,
'code': 0
}
}
}
return message
|
the-stack_0_25019
|
import unittest
from pyfair.model.model import FairModel
from pyfair.model.model import FairDependencyTree
class TestFairDependencyTree(unittest.TestCase):
TOTAL_NODE_COUNT = 13
LEAF_NODE_COUNT = 7
def test_creation(self):
"""Check proper creation of tree."""
tree = FairDependencyTree()
self.assertEqual(len(tree.nodes), self.TOTAL_NODE_COUNT)
self.assertEqual(len(tree._leaf_nodes), self.LEAF_NODE_COUNT)
def test_inspections(self):
"""Check functions returning bools"""
# Create tree ready for calculation
tree = FairDependencyTree()
tree.update_status('Loss Event Frequency', 'Supplied')
tree.update_status('Loss Magnitude', 'Supplied')
# Assert that it is ready for calculation but not complete
self.assertTrue(tree.ready_for_calculation())
self.assertFalse(tree.calculation_completed())
# Now mimic calculation and assert complete
tree.update_status('Risk', 'Calculated')
self.assertTrue(tree.calculation_completed())
def test_downward_propogation(self):
"""Ensure propogation up and down the tree works"""
tree = FairDependencyTree()
# The supply two nodes
tree.update_status('Loss Event Frequency', 'Supplied')
tree.update_status('Loss Magnitude', 'Supplied')
# Each of those nodes should now equal supplied
statuses = tree.get_node_statuses()
for node in [
'Loss Event Frequency',
'Loss Magnitude'
]:
self.assertEqual(statuses[node], 'Supplied')
# And inferior nodes should be 'Not Required'
for node in [
'Threat Event Frequency',
'Vulnerability',
'Contact Frequency',
'Probability of Action',
'Threat Capability',
'Control Strength',
'Primary Loss',
'Secondary Loss',
'Secondary Loss Event Frequency',
'Secondary Loss Event Magnitude',
]:
self.assertEqual(statuses[node], 'Not Required')
def test_upward_propagation(self):
"""Ensure upward calculation propogation works"""
tree = FairDependencyTree()
# The supply three nodes
tree.update_status('Loss Event Frequency', 'Supplied')
tree.update_status('Primary Loss', 'Supplied')
tree.update_status('Secondary Loss', 'Supplied')
# Get statuses and check appropriate fields are calculable
statuses = tree.get_node_statuses()
for node in [
'Risk',
'Loss Magnitude'
]:
self.assertEqual(statuses[node], 'Calculable')
# Now mark a node as calculated
tree.update_status('Loss Magnitude', 'Calculated')
statuses = tree.get_node_statuses()
self.assertEqual(statuses['Loss Magnitude'], 'Calculated')
if __name__ == '__main__':
unittest.main()
|
the-stack_0_25021
|
from collections import OrderedDict
import pandas as pd
from controller.WEAP_exporter import Extract_Network
# Option 1: query network based on the provided name in the GUI
def GetWASH_net_SearchValues(unique_object_types_value_list,BranchesNew_list):
exclude_object_type_list = ['Return Flow Node', 'River Withdrawal', 'Tributary Inflow', 'River Mouth']
# loop over all the WEAP Object Types and exclude the above ones (they dont take input data so no need to search for them)
for Objects in BranchesNew_list:
for unique_object_types_value in unique_object_types_value_list:
if unique_object_types_value in exclude_object_type_list:
continue
# Save them to Excel
field_names = ['ObjectType', 'ObjectTypology','InstanceName','InstanceNameCV','FullBranch']
# Option 2: Read network from a provided excel file
# Read the excel file
Network_input = pd.read_excel('./Network_input.xlsm', sheetname=None)
Network_input.keys()
Provided_ObjectType
Provided_InstanceName
Provided_FullBranch
# based on the selected model (WEAP) or (WASH),
ModelName='WASH'
#Query WaMDaM db to get the list of Object types and their Attributes
Model_required_attributes='''
SELECT DISTINCT ObjectType as Required_ObjectType,ObjectTypeCV as Required_ObjectTypeCV ,
AttributeName as Required_AttributeName, AttributeNameCV Required_AttributeNameCV,
AttributeDataTypeCV as Required_AttributeDataTypeCV, UnitName as Required_UnitName
FROM ResourceTypes
LEFT JOIN "ObjectTypes"
ON "ObjectTypes"."ResourceTypeID"="ResourceTypes"."ResourceTypeID"
LEFT JOIN "ObjectCategories"
ON "ObjectCategories"."ObjectCategoryID"="ObjectTypes"."ObjectCategoryID"
LEFT JOIN "Attributes"
ON "Attributes"."ObjectTypeID"="ObjectTypes"."ObjectTypeID"
LEFT JOIN "AttributeCategories"
ON "AttributeCategories"."AttributeCategoryID"="Attributes"."AttributeCategoryID"
-- Provide the model name
WHERE "ResourceTypeAcronym"='WEAP' --ModelName
--WHERE "ResourceTypeAcronym"='WASH'
--exclude the dummy attributes that are just used to connect Object Types with their Instances.
AND AttributeName!='ObjectTypeInstances'
'''
df_Model_required_attributes= session.execute(Model_required_attributes)
Model_required_attributes.keys()
Query_Load_params=[]
Required_ObjectType
Required_AttributeName
Required_AttributeDataTypeCV
Required_UnitName
for prov_objs in Provided_ObjectType:
for req_objs in Required_ObjectType:
if prov_objs == req_objs:
Query_Load_params['Required_AttributeName'] = req_objs[Required_AttributeName]
Query_Load_params['Required_AttributeDataTypeCV'] = req_objs[Required_AttributeDataTypeCV]
Query_Load_params['Required_UnitName'] = req_objs[Required_UnitName]
Query_Load_params['Provided_InstanceName'] = prov_objs[Provided_InstanceName]
Query_Load_params['Provided_FullBranch'] = prov_objs[Provided_FullBranch]
return Query_Load_params
# for
# based on the provided Object type in excel, iterate over its defined Attributes in WAMDAM for each Instance
|
the-stack_0_25023
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import shutil
#----------------------------------------
# Criado por: Wolfterro
# Versão: 1.0 - Python 2.x
# Data: 01/06/2016
#----------------------------------------
version = "1.0"
# Menu de Ajuda
# =============
def help():
print ("Uso: ./Heavy-R Downloader.py [Argumento] [ID] ...")
print ("-------------------------------------------------\n")
print ("Argumentos:")
print ("-----------")
print ("-h || --help\t\tMostra esta tela de ajuda")
print ("-u || --url\t\tUtiliza URL's inseridas como argumentos para o script\n")
print (" * Caso o modo 'URL' não consiga baixar o vídeo, utilize o modo simples, executando o programa sem uso de argumentos.\n")
# Verificação de Pasta
# ====================
def check_folder():
if os.path.exists("Heavy-R"):
os.chdir("Heavy-R")
else:
print ("[Heavy-R Downloader] Pasta 'Heavy-R' não existe! Criando ...\n")
os.makedirs("Heavy-R")
os.chdir("Heavy-R")
# Download de Vídeos em Modo Simples e Modo URL
# =============================================
def get_video_simple_mode(get_video_url):
if get_video_url == None:
get_video_url = raw_input("Insira a URL do vídeo: ").replace(" ", "")
separator = "/"
get_video_id = get_video_url.replace("http://www.heavy-r.com/video/", "").split(separator, 1)[0]
get_video_name = get_video_url.replace("http://www.heavy-r.com/video/", "").split(separator, 1)[1].replace("/", "")
print ("[Heavy-R Downloader] Baixando página do vídeo selecionado (" + get_video_name.replace("_", " ") + ") ...")
generated_folder = get_video_id + "-" + get_video_name
if os.path.exists(generated_folder):
print ("[Heavy-R Downloader] Erro! Vídeo já existe na pasta! Abortando ...")
print ("\n==================================================================\n")
else:
os.makedirs(generated_folder)
os.chdir(generated_folder)
os.system("wget -O index.html " + "\"" + get_video_url + "\"" + " -q --show-progress")
print ("[Heavy-R Downloader] Analisando página em busca de link ...")
if os.path.getsize("index.html") == 0:
print ("[Heavy-R Downloader] Erro! Página corrompida ou não existente! Removendo pasta ...")
print ("\n==================================================================================\n")
os.chdir("..")
shutil.rmtree(generated_folder)
return
os.system("grep -m 1 'file: ' index.html >> link.txt")
print ("[Heavy-R Downloader] Baixando vídeo através do link encontrado ...")
file_link = open("link.txt")
linha_link = file_link.readlines()
get_download_link = str(linha_link[0]).replace("file: ", "").replace("'", "").replace(",", "").replace(" ", "").replace("\n", "")
file_link.close()
os.system("wget -O " + get_video_name + ".mp4 " + "\"" + get_download_link + "\"" + " -q --show-progress")
print ("[Heavy-R Downloader] Verificando se o vídeo foi baixado corretamente ...", end="")
if os.path.getsize(get_video_name + ".mp4") == 0:
print (" !! FALHA !!")
falha = True
else:
print (" OK!")
falha = False
print ("[Heavy-R Downloader] Eliminando arquivos temporários e com falhas ...")
print ("\n=====================================================================\n")
os.remove("link.txt")
os.remove("index.html")
os.chdir("..")
if falha == True:
shutil.rmtree(generated_folder)
# Método Principal
# ================
def main():
argc = len(sys.argv)
print ("===============================")
print ("Heavy-R Downloader - Versão %s" % (version))
print ("===============================\n")
if argc <= 1:
check_folder()
get_video_simple_mode(None)
elif str(sys.argv[1]) == "-u" or str(sys.argv[1]) == "--url":
check_folder()
if argc <= 2:
print ("[Heavy-R Downloader] Erro! Falta URL's! Use -h ou --help para ajuda.\n")
else:
for x in range(2, argc):
get_video_simple_mode(str(sys.argv[x]))
elif str(sys.argv[1]) == "-h" or str(sys.argv[1]) == "--help":
help()
else:
check_folder()
get_video_simple_mode(None)
# Inicializando Programa
# ======================
main()
|
the-stack_0_25025
|
from collections import Iterable
from itertools import chain
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type, msat_get_integer_type, \
msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
num_procs = 24
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type):
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
delta, x_delta = decl_consts(menv, delta_name, real_type)
curr2next = {delta: x_delta}
a = msat_make_number(menv, "2")
b = msat_make_number(menv, "5")
c = msat_make_number(menv, "1")
d = msat_make_number(menv, "2")
e = msat_make_number(menv, "1")
gate = Gate("gate", menv, enc, c, d, delta)
controller = Controller("controller", menv, enc, e, num_procs + 1,
delta)
trains = [Train("t{}".format(idx), menv, enc, a, b, delta)
for idx in range(num_procs)]
components = [gate, controller, *trains]
for p in components:
for s, x_s in p.symb2next.items():
assert s not in curr2next.keys()
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
# delta > 0
init = msat_make_geq(menv, delta, zero)
trans = msat_make_geq(menv, x_delta, zero)
for p in components:
init = msat_make_and(menv, init, p.init)
trans = msat_make_and(menv, trans, p.trans)
d_eq_0 = msat_make_equal(menv, delta, zero)
# only 1 train moves
for idx0, t0 in enumerate(trains):
other_stutter = None
for idx1, t1 in enumerate(trains):
if idx0 != idx1:
if other_stutter is None:
other_sutter = t1.evt_stutter
else:
other_sutter = msat_make_and(menv, other_sutter,
t1.evt_stutter)
lhs = msat_make_and(menv, d_eq_0,
msat_make_not(menv, t0.evt_stutter))
curr = msat_make_impl(menv, lhs, other_sutter)
trans = msat_make_and(menv, trans, curr)
# sync evt_lower
trans = msat_make_and(menv, trans,
msat_make_impl(
menv, d_eq_0,
msat_make_iff(menv, controller.evt_lower,
gate.evt_lower)))
# sync evt_rise
trans = msat_make_and(menv, trans,
msat_make_impl(
menv, d_eq_0,
msat_make_iff(menv, controller.evt_rise,
gate.evt_rise)))
# sync evt_approach
train_approach = trains[0].evt_approach
for t in trains[1:]:
train_approach = msat_make_or(menv, train_approach, t.evt_approach)
trans = msat_make_and(menv, trans,
msat_make_impl(
menv, d_eq_0,
msat_make_iff(menv, controller.evt_approach,
train_approach)))
# sync evt_exit
train_exit = trains[0].evt_exit
for t in trains[1:]:
train_exit = msat_make_or(menv, train_exit, t.evt_exit)
trans = msat_make_and(menv, trans,
msat_make_impl(
menv, d_eq_0,
msat_make_iff(menv, controller.evt_exit,
train_exit)))
# G ((gate.g0 & gate.g1') -> F (gate.g2 & gate.g3'))
lhs = msat_make_and(menv, gate.g0, enc.make_X(gate.g1))
rhs = msat_make_and(menv, gate.g2, enc.make_X(gate.g3))
ltl = enc.make_G(msat_make_impl(menv, lhs, enc.make_F(rhs)))
return TermMap(curr2next), init, trans, ltl
class Module:
"""Synchronous component"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(self.menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(self._symb(c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(self.menv, b_vars[idx][0]),
msat_make_not(self.menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(self.menv, pred, it[0])
x_pred = msat_make_and(self.menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
class Train(Module):
"""Train module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
a, b, delta):
super().__init__(name, menv, enc)
# int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
# loc, x_loc = self._symb("l", int_type)
loc_symbs, locs, x_locs = self._enum("l", 4)
# evt, x_evt = self._symb("evt", int_type)
evt_symbs, evts, x_evts = self._enum("evt", 4)
x, x_x = self._symb("x", real_type)
self.symb2next = {x: x_x}
for s, x_s in chain(evt_symbs, loc_symbs):
assert s not in self.symb2next
self.symb2next[s] = x_s
self.evt_stutter = evts[0]
self.evt_approach = evts[1]
self.evt_exit = evts[2]
self.evt_move = evts[3]
x_evt_stutter = x_evts[0]
x_evt_approach = x_evts[1]
x_evt_exit = x_evts[2]
x_evt_move = x_evts[3]
self.t0 = locs[0]
self.t1 = locs[1]
self.t2 = locs[2]
self.t3 = locs[3]
self.x_t0 = x_locs[0]
self.x_t1 = x_locs[1]
self.x_t2 = x_locs[2]
self.x_t3 = x_locs[3]
same_loc = msat_make_iff(menv, loc_symbs[0][1], loc_symbs[0][0])
for s, x_s in loc_symbs[1:]:
same_loc = msat_make_and(menv, same_loc,
msat_make_iff(menv, x_s, s))
zero = msat_make_number(menv, "0")
# l = t0 & x = 0
self.init = msat_make_and(menv, self.t0,
msat_make_equal(menv, x, zero))
# bound l
bound_l = msat_make_or(menv,
msat_make_or(menv, self.t0, self.t1),
msat_make_or(menv, self.t2, self.t3))
self.init = msat_make_and(menv, self.init, bound_l)
x_bound_l = msat_make_or(menv,
msat_make_or(menv, self.x_t0, self.x_t1),
msat_make_or(menv, self.x_t2, self.x_t3))
self.trans = x_bound_l
# bound evt
bound_evt = msat_make_or(menv,
msat_make_or(menv, self.evt_stutter,
self.evt_approach),
msat_make_or(menv, self.evt_exit,
self.evt_move))
self.init = msat_make_and(menv, self.init, bound_evt)
x_bound_evt = msat_make_or(menv,
msat_make_or(menv, x_evt_stutter,
x_evt_approach),
msat_make_or(menv, x_evt_exit,
x_evt_move))
self.trans = msat_make_and(menv, self.trans, x_bound_evt)
# invars: l != t0 -> x <= b
lhs = msat_make_not(menv, self.t0)
rhs = msat_make_leq(menv, x, b)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, lhs, rhs))
# invars: l != t0 -> x <= b
lhs = msat_make_not(menv, self.x_t0)
rhs = msat_make_leq(menv, x_x, b)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# delta > 0 | stutter -> x' = x + delta & l' = l
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero),
self.evt_stutter)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_x,
msat_make_plus(menv, x, delta)),
same_loc)
self.trans = msat_make_and( menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_not(menv, self.evt_stutter))
# (l = t0) -> (l' = t1 & evt_approach & x' = 0)
lhs = msat_make_and(menv, disc_t, self.t0)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_t1,
self.evt_approach),
msat_make_equal(menv, x_x, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = t1) -> (l' = t2 & x > a & evt_move & x' = x)
lhs = msat_make_and(menv, disc_t, self.t1)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_t2,
msat_make_gt(menv, x, a)),
msat_make_and(menv, self.evt_move,
msat_make_equal(menv, x_x, x)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = t2) -> (l' = t3 & evt_move & x' = x)
lhs = msat_make_and(menv, disc_t, self.t2)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_t3, self.evt_move),
msat_make_equal(menv, x_x, x))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = t3) -> (l' = t0 & x <= b & evt_exit & x' = x)
lhs = msat_make_and(menv, disc_t, self.t3)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_t0,
msat_make_leq(menv, x, b)),
msat_make_and(menv, self.evt_exit,
msat_make_equal(menv, x_x, x)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Gate(Module):
"""Gate module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
c, d, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
loc_symbs, locs, x_locs = self._enum("l", 4)
evt_symbs, evts, x_evts = self._enum("evt", 4)
y, x_y = self._symb("y", real_type)
self.symb2next = {y: x_y}
for s, x_s in chain(loc_symbs, evt_symbs):
assert s not in self.symb2next
self.symb2next[s] = x_s
self.evt_stutter = evts[0]
self.evt_lower = evts[1]
self.evt_rise = evts[2]
self.evt_move = evts[3]
x_evt_stutter = x_evts[0]
x_evt_lower = x_evts[1]
x_evt_rise = x_evts[2]
x_evt_move = x_evts[3]
self.g0 = locs[0]
self.g1 = locs[1]
self.g2 = locs[2]
self.g3 = locs[3]
self.x_g0 = x_locs[0]
self.x_g1 = x_locs[1]
self.x_g2 = x_locs[2]
self.x_g3 = x_locs[3]
same_loc = msat_make_iff(menv, loc_symbs[0][1], loc_symbs[0][0])
for s, x_s in loc_symbs[1:]:
same_loc = msat_make_and(menv, same_loc,
msat_make_iff(menv, x_s, s))
zero = msat_make_number(menv, "0")
# l = g0 & y = 0
self.init = msat_make_and(menv, self.g0,
msat_make_equal(menv, y, zero))
# bound l
bound_l = msat_make_or(menv,
msat_make_or(menv, self.g0, self.g1),
msat_make_or(menv, self.g2, self.g3))
self.init = msat_make_and(menv, self.init, bound_l)
x_bound_l = msat_make_or(menv,
msat_make_or(menv, self.x_g0, self.x_g1),
msat_make_or(menv, self.x_g2, self.x_g3))
self.trans = x_bound_l
# bound evt
bound_evt = msat_make_or(menv,
msat_make_or(menv, self.evt_stutter,
self.evt_lower),
msat_make_or(menv, self.evt_rise,
self.evt_move))
self.init = msat_make_and(menv, self.init, bound_evt)
x_bound_evt = msat_make_or(menv,
msat_make_or(menv, x_evt_stutter,
x_evt_lower),
msat_make_or(menv, x_evt_rise,
x_evt_move))
self.trans = msat_make_and(menv, self.trans, x_bound_evt)
# invars: l = g1 -> y <= c; l = g3 -> y <= d
lhs = self.g1
rhs = msat_make_leq(menv, y, c)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, lhs, rhs))
lhs = self.g3
rhs = msat_make_leq(menv, y, d)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, lhs, rhs))
# invars: l = g1 -> y <= c; l = g3 -> y <= d
lhs = self.x_g1
rhs = msat_make_leq(menv, x_y, c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = self.x_g3
rhs = msat_make_leq(menv, x_y, d)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# delta > 0 | stutter -> y' = y + delta & l' = l
lhs = msat_make_or(menv,
msat_make_gt(menv, delta, zero),
self.evt_stutter)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_y,
msat_make_plus(menv, y, delta)),
same_loc)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_not(menv, self.evt_stutter))
# (l = g0) -> (l' = g1 & evt_lower & y' = 0)
lhs = msat_make_and(menv, disc_t, self.g0)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_g1,
self.evt_lower),
msat_make_equal(menv, x_y, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = g1) -> (l' = g2 & y <= c & evt_move & y' = y)
lhs = msat_make_and(menv, disc_t, self.g1)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_g2,
self.evt_move),
msat_make_and(menv,
msat_make_leq(menv, y, c),
msat_make_equal(menv, x_y, y)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = g2) -> (l' = g3 & evt_rise & y' = 0)
lhs = msat_make_and(menv, disc_t, self.g2)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_g3, self.evt_rise),
msat_make_equal(menv, x_y, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = g3) -> (l' = g0 & y >= c & y <= d & evt_move & y' = y)
lhs = msat_make_and(menv, disc_t, self.g3)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_g0,
msat_make_geq(menv, y, c)),
msat_make_and(menv,
msat_make_leq(menv, y, d),
self.evt_move))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_y, y))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Controller(Module):
"""Controller module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
e, N, delta):
super().__init__(name, menv, enc)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc_symbs, locs, x_locs = self._enum("l", 4)
evt_symbs, evts, x_evts = self._enum("evt", 5)
z, x_z = self._symb("z", real_type)
cnt, x_cnt = self._symb("cnt", int_type)
self.symb2next = {z: x_z, cnt: x_cnt}
for s, x_s in chain(loc_symbs, evt_symbs):
assert s not in self.symb2next
self.symb2next[s] = x_s
self.evt_stutter = evts[0]
self.evt_approach = evts[1]
self.evt_exit = evts[2]
self.evt_lower = evts[3]
self.evt_rise = evts[4]
x_evt_stutter = x_evts[0]
x_evt_approach = x_evts[1]
x_evt_exit = x_evts[2]
x_evt_lower = x_evts[3]
x_evt_rise = x_evts[4]
self.c0 = locs[0]
self.c1 = locs[1]
self.c2 = locs[2]
self.c3 = locs[3]
self.x_c0 = x_locs[0]
self.x_c1 = x_locs[1]
self.x_c2 = x_locs[2]
self.x_c3 = x_locs[3]
same_loc = msat_make_iff(menv, loc_symbs[0][1], loc_symbs[0][0])
for s, x_s in loc_symbs[1:]:
same_loc = msat_make_and(menv, same_loc,
msat_make_iff(menv, x_s, s))
nums = [msat_make_number(menv, str(i)) for i in range(N + 1)]
N = nums[-1]
# l = c0 & z = 0
self.init = msat_make_and(menv, self.c0,
msat_make_equal(menv, z, nums[0]))
# bound l
bound_l = msat_make_or(menv,
msat_make_or(menv, self.c0, self.c1),
msat_make_or(menv, self.c2, self.c3))
self.init = msat_make_and(menv, self.init, bound_l)
x_bound_l = msat_make_or(menv,
msat_make_or(menv, self.x_c0, self.x_c1),
msat_make_or(menv, self.x_c2, self.x_c3))
self.trans = x_bound_l
# bound evt
bound_evt = msat_make_or(
menv,
msat_make_or(menv, self.evt_stutter,
self.evt_approach),
msat_make_or(menv, self.evt_exit,
msat_make_or(menv, self.evt_lower,
self.evt_rise)))
self.init = msat_make_and(menv, self.init, bound_evt)
x_bound_evt = msat_make_or(
menv,
msat_make_or(menv, x_evt_stutter,
x_evt_approach),
msat_make_or(menv, x_evt_exit,
msat_make_or(menv, x_evt_lower,
x_evt_rise)))
self.trans = msat_make_and(menv, self.trans, x_bound_evt)
# bound cnt
bound_cnt = msat_make_equal(menv, cnt, nums[0])
x_bound_cnt = msat_make_equal(menv, x_cnt, nums[0])
for i in nums[1:]:
bound_cnt = msat_make_or(menv, bound_cnt,
msat_make_equal(menv, cnt, i))
x_bound_cnt = msat_make_or(menv, x_bound_cnt,
msat_make_equal(menv, x_cnt, i))
self.init = msat_make_and(menv, self.init, bound_cnt)
self.trans = msat_make_and(menv, self.trans, x_bound_cnt)
# invars: (l = c1 | l = c3) -> (z <= e)
lhs = msat_make_or(menv, self.c1, self.c3)
rhs = msat_make_leq(menv, z, e)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, lhs, rhs))
# invars: (l = c1 | l = c3) -> (z <= e)
lhs = msat_make_or(menv, self.x_c1, self.x_c3)
rhs = msat_make_leq(menv, x_z, e)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# delta > 0 | stutter -> z' = z + delta & l' = l & cnt' = cnt
lhs = msat_make_or(menv, msat_make_gt(menv, delta, nums[0]),
self.evt_stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_z,
msat_make_plus(menv, z, delta)),
same_loc),
msat_make_equal(menv, x_cnt, cnt))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, nums[0]),
msat_make_not(menv, self.evt_stutter))
# (l = c0) -> (l' = c1 & evt_approach & z' = 0 & cnt' = 1)
lhs = msat_make_and(menv, disc_t, self.c0)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_c1,
self.evt_approach),
msat_make_and(menv,
msat_make_equal(menv, x_z, nums[0]),
msat_make_equal(menv, x_cnt,
nums[1])))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c1) -> ((l' = c1 | l' = c2) & z' = z)
lhs = msat_make_and(menv, disc_t, self.c1)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_z, z),
msat_make_or(menv, self.x_c1, self.x_c2))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c1 & l' = c1) -> ((evt_approach & cnt' = cnt + 1) |
# (evt_exit & cnt' = cnt - 1))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c1, self.x_c1))
dec_cnt = msat_make_equal(menv, x_cnt,
msat_make_minus(menv, cnt, nums[1]))
inc_cnt = msat_make_equal(menv, x_cnt,
msat_make_plus(menv, cnt, nums[1]))
rhs = msat_make_or(menv,
msat_make_and(menv, self.evt_approach, inc_cnt),
msat_make_and(menv, self.evt_exit, dec_cnt))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c1 & l' = c2) -> (evt_lower & z = e & cnt' = cnt)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c1, self.x_c2))
rhs = msat_make_and(menv, self.evt_lower,
msat_make_and(menv,
msat_make_equal(menv, z, e),
msat_make_equal(menv, x_cnt, cnt)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c2) -> (l' = c2 | l' = c3)
lhs = msat_make_and(menv, disc_t, self.c2)
rhs = msat_make_or(menv, self.x_c2, self.x_c3)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# l = c2 & l' = c2) -> (z' = z & ((cnt > 1 & evt_exit & cnt' = cnt - 1) |
# (evt_approach & cnt' = cnt + 1)))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c2, self.x_c2))
disj0 = msat_make_and(menv,
msat_make_gt(menv, cnt, nums[1]),
msat_make_and(menv, self.evt_exit,
dec_cnt))
rhs = msat_make_and(menv,
msat_make_equal(menv, x_z, z),
msat_make_or(menv, disj0,
msat_make_and(menv, self.evt_approach,
inc_cnt)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c2 & l' = c3) -> (cnt = 1 & evt_exit & z' = 0 & cnt' = 0)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c2, self.x_c3))
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, cnt, nums[1]),
self.evt_exit),
msat_make_and(menv,
msat_make_equal(menv, x_z, nums[0]),
msat_make_equal(menv, x_cnt,
nums[0])))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c3) -> ((l' = c2 | l' = c0) & z' = z)
lhs = msat_make_and(menv, disc_t, self.c3)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_z, z),
msat_make_or(menv, self.x_c2, self.x_c0))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c3 & l' = c2) -> (z <= e & evt_approach & cnt' = cnt + 1)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c3, self.x_c2))
rhs = msat_make_and(menv, inc_cnt,
msat_make_and(menv,
msat_make_leq(menv, z, e),
self.evt_approach))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c3 & l' = c0) -> (z <= e & evt_rise & cnt' = cnt)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c3, self.x_c0))
rhs = msat_make_and(menv,
msat_make_equal(menv, x_cnt, cnt),
msat_make_and(menv, self.evt_rise,
msat_make_leq(menv, z, e)))
|
the-stack_0_25026
|
"""
In Insertion Sort Part 1, you inserted one element into an array at its correct sorted position. Using the same approach repeatedly, can you sort an entire array?
Guideline: You already can place an element into a sorted array. How can you use that code to build up a sorted array, one element at a time? Note that in the first step, when you consider an array with just the first element, it is already sorted since there's nothing to compare it to.
In this challenge, print the array after each iteration of the insertion sort, i.e., whenever the next element has been inserted at its correct position. Since the array composed of just the first element is already sorted, begin printing after placing the second element.
For example, there are elements in . Working from left to right, we get the following output:
3 4 7 5 6 2 1
3 4 7 5 6 2 1
3 4 5 7 6 2 1
3 4 5 6 7 2 1
2 3 4 5 6 7 1
1 2 3 4 5 6 7
Function Description
Complete the insertionSort2 function in the editor below. At each iteration, it should print the array as space-separated integers on a separate line.
insertionSort2 has the following parameter(s):
n: an integer representing the length of the array
arr: an array of integers
Input Format
The first line contains an integer, , the size of .
The next line contains space-separated integers .
Constraints
Output Format
On each line, output the entire array at every iteration.
Sample Input
6
1 4 3 5 6 2
Sample Output
1 4 3 5 6 2
1 3 4 5 6 2
1 3 4 5 6 2
1 3 4 5 6 2
1 2 3 4 5 6
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the insertionSort2 function below.
def insertionSort2(n, arr):
for i in range(1, len(arr)):
key = arr[i]
j = i - 1
while j >= 0 and arr[j] > key:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = key
printArray(arr)
return arr
def printArray(arr):
print(" ".join([str(x) for x in arr]))
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
insertionSort2(n, arr)
|
the-stack_0_25028
|
import base64
import os
import struct
import unittest
from ssh_ca import agent_client
class TestAgentSocketValidation(unittest.TestCase):
def test_nonexistent_explicit_path(self):
self.assertRaises(
agent_client.SshClientFailure,
agent_client.Client,
'/radio/flyer/inchworm/agent.sock'
)
def test_nonexistent_env_path(self):
# don't shoot me for setting an environment variable in a test. I hate
# myself for it.
old_env = os.getenv('SSH_AUTH_SOCK')
try:
os.environ['SSH_AUTH_SOCK'] = '/eflite/alpha/450/sockeroony.sock'
self.assertRaises(
agent_client.SshClientFailure,
agent_client.Client,
)
finally:
if old_env is not None:
os.environ['SSH_AUTH_SOCK'] = old_env
class TestAgentBuffer(unittest.TestCase):
def test_nominal(self):
# This is a pretty stupid test. But it does touch all of the code in
# the class and it verifies that everything we shoved in there actually
# ended up in the serialized string somewhere. Though it may be in the
# wrong place or not actually correct. Better than nothing?
buf = agent_client.SshAgentBuffer()
buf.append_byte(93)
buf.append_uint32(12394)
buf.append_bytestring(base64.b64decode('AAAA'))
results = buf.serialize()
self.assertIn(bytes([93]), results)
self.assertIn(struct.pack('>I', 12394), results)
self.assertIn(b'\x00\x00\x00', results)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_25029
|
# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
#
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Providing interface methods."""
import types
from collections import OrderedDict
from functools import wraps
from mindspore import context
from mindspore import log as logger
from .._c_expression import generate_key, Executor_, Tensor, MetaTensor
from .._c_expression import verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_backend
from .tensor import Tensor as MsTensor
# store ms_function class compiled pipeline cache
ms_compile_cache = {}
def _convert_function_arguments(fn, *args):
"""
Process the fn default parameters.
Args:
fn (Function): The function to be parsed.
args (tuple): The parameters of the function.
"""
arguments_dict = OrderedDict()
parse_method = None
if isinstance(fn, (types.FunctionType, types.MethodType)):
parse_method = fn.__name__
index = 0
for value in args:
arguments_dict[f'arg{index}'] = value
index = index + 1
logger.debug("fn(%r) full parameters dict is: %r", fn, arguments_dict)
converted = True
else:
logger.warning("Find error: fn isn't function or method")
converted = False
return converted, arguments_dict, parse_method
def _wrap_func(fn):
"""
Wrapper function, convert return data to tensor or tuple of tensor.
Args:
fn (Function): The function need be wrapped.
Returns:
Function, a new function with return suitable format data.
"""
@wraps(fn)
def wrapper(*arg, **kwargs):
results = fn(*arg, **kwargs)
def _convert_data(data):
if isinstance(data, Tensor) and not isinstance(data, MsTensor):
return MsTensor(data)
if isinstance(data, tuple):
return tuple(_convert_data(x) for x in data)
if isinstance(data, list):
return list(_convert_data(x) for x in data)
return data
return _convert_data(results)
return wrapper
def _exec_init_graph(obj, init_phase):
"""Execute the parameter initializer graph."""
inst_executor = Executor_.get_instance()
param_dict = OrderedDict()
for name, param in obj.parameters_dict().items():
if not param.is_init:
param_dict[name] = param
param.is_init = True
param.data.init_flag = True
if param_dict:
inst_executor.run_init_graph(param_dict, init_phase)
class _MindSporeFunction:
"""
Represents a function compiled by mind expression.
_MindSporeFunction will compile the original function for every combination
of argument types and shapes it is given (as well as their values, optionally).
Args:
fn (Function): The root function to compile.
input_signature (Function): User defines signature to verify input.
obj (Object): If function is a method, obj is the owner of function,
else, obj is none.
"""
def __init__(self, fn, input_signature=None, obj=None):
self.fn = fn
self.save_graphs = context.get_context("save_graphs")
self.save_graphs_path = context.get_context("save_graphs_path")
self.input_signature = input_signature
self.obj = None
self.identify_obj = None
if hasattr(obj, fn.__name__):
self.obj = obj
elif obj is not None:
self.identify_obj = obj
self._executor = Executor_.get_instance()
def build_data_init_graph(self, graph_name):
"""Build GE data graph and init graph for the given graph name."""
if self.obj is None:
logger.warning("Make sure parameter should not be used in function")
para_dict = OrderedDict()
self._executor.build_data_graph(para_dict, graph_name)
return
self._executor.build_data_graph(self.obj.parameters_dict(), graph_name, self.obj.parameters_broadcast_dict())
init_phase = "init_subgraph" + graph_name[graph_name.find("."):]
_exec_init_graph(self.obj, init_phase)
def compile(self, arguments_dict, method_name):
"""Returns pipline for the given args."""
args_list = tuple(arguments_dict.values())
arg_names = tuple(arguments_dict.keys())
# remove first self parameter when fn is a method
if self.obj is not None:
args_list = args_list[1:]
arg_names = arg_names[1:]
# verify the signature for both function and method
if self.input_signature is not None:
signatures = []
for sig_spec in self.input_signature:
if not isinstance(sig_spec, MetaTensor):
raise TypeError("Input_signature is not MetaTensor")
signatures.append(sig_spec)
is_valid_input = verify_inputs_signature(signatures, args_list)
if not is_valid_input:
raise ValueError("Inputs is incompatible with input signature!")
dic = dict(zip(arg_names, args_list))
generate_name = self.fn.__module__ + "." + self.fn.__name__
self.fn.__parse_method__ = method_name
# replace key with obj info and object ext info when fn is a method
if self.obj is not None:
self.obj.__parse_method__ = method_name
generate_name = self.obj.__module__ + "." + str(self.obj.create_time)
if self.identify_obj is not None:
generate_name = generate_name + str(id(self.identify_obj))
key = generate_key(generate_name, dic)
phase = str(key[1]) + generate_name
if key not in ms_compile_cache.keys():
is_compile = False
if self.obj is None:
is_compile = self._executor.compile(self.fn, args_list, phase, True)
else:
is_compile = self._executor.compile(self.obj, args_list, phase, True)
if not is_compile:
raise RuntimeError("Executor compile failed.")
if context.get_context("enable_ge"):
self.build_data_init_graph(phase)
# since function can be redefined, we only cache class method pipeline
if self.obj is not None or self.identify_obj is not None:
ms_compile_cache[key] = phase
return phase
return ms_compile_cache[key]
@_wrap_func
def __call__(self, *args):
init_backend()
converted, arguments_dict, parse_method = _convert_function_arguments(self.fn, *args)
if not converted:
raise RuntimeError('Process function parameter is failure')
args_list = tuple(arguments_dict.values())
if self.obj is not None:
args_list = args_list[1:]
phase = self.compile(arguments_dict, parse_method)
if context.get_context("precompile_only"):
return None
return self._executor(args_list, phase)
def ms_function(fn=None, obj=None, input_signature=None):
"""
Creates a callable MindSpore graph from a python function.
This allows the MindSpore runtime to apply optimizations based on graph.
Args:
fn (Function): The Python function that will be run as a graph. Default: None.
obj (Object): The Python Object that provide information for identify compiled function. Default: None.
input_signature (MetaTensor): The MetaTensor to describe the input arguments. The MetaTensor specifies
the shape and dtype of the Tensor and they will be supplied to this function. If input_signature
is specified, every input to `fn` must be a `Tensor`. And the input parameters of `fn` cannot accept
`**kwargs`. The shape and dtype of actual inputs should keep same with input_signature, or TypeError
will be raised. Default: None.
Returns:
Function, if `fn` is not None, returns a callable that will execute the compiled function; If `fn` is None,
returns a decorator and when this decorator invokes with a single `fn` argument, the callable is equal to the
case when `fn` is not None.
Examples:
>>> def tensor_add(x, y):
>>> z = F.tensor_add(x, y)
>>> return z
>>>
>>> @ms_function
>>> def tensor_add_with_dec(x, y):
>>> z = F.tensor_add(x, y)
>>> return z
>>>
>>> @ms_function(input_signature=(MetaTensor(mindspore.float32, (1, 1, 3, 3)),
>>> MetaTensor(mindspore.float32, (1, 1, 3, 3))))
>>> def tensor_add_with_sig(x, y):
>>> z = F.tensor_add(x, y)
>>> return z
>>>
>>> x = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
>>> y = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
>>>
>>> tensor_add_graph = ms_function(fn=tensor_add)
>>> out = tensor_add_graph(x, y)
>>> out = tensor_add_with_dec(x, y)
>>> out = tensor_add_with_sig(x, y)
"""
def wrap_mindspore(func):
@wraps(func)
def staging_specialize(*args):
process_obj = obj
if args and not isinstance(args[0], MsTensor) and hasattr(args[0], func.__name__):
process_obj = args[0]
args = (x.default_input if hasattr(x, 'default_input') else x for x in args)
return _MindSporeFunction(func, input_signature, process_obj)(*args)
return staging_specialize
if fn is not None:
return wrap_mindspore(fn)
return wrap_mindspore
def _generate_pip_args(obj, *args, method="construct"):
"""Generate arguments for pipeline."""
if hasattr(obj, method):
fn = getattr(obj, method)
else:
raise AttributeError('The process method is not exist')
converted, arguments_dict, parse_method = _convert_function_arguments(fn, *args)
if not converted:
raise RuntimeError('Process method parameter is failure')
args_list = tuple(arguments_dict.values())
args_names = tuple(arguments_dict.keys())
obj.__parse_method__ = parse_method
return args_names, args_list
class _Executor:
"""
An executor used to compile/manage/run graph.
Including data_graph, train_graph, eval_graph and predict graph.
Returns:
Graph, return the result of pipeline running.
"""
def __init__(self):
# create needed graph by lazy mode
self.is_init = False
self._executor = Executor_.get_instance()
self.compile_cache = {}
self.phase_prefix = ""
def init_dataset(self, queue_name, dataset_size, batch_size, dataset_types, dataset_shapes,
input_indexs, phase='dataset'):
"""
Initialization interface for calling data subgraph.
Args:
queue_name (str): The name of tdt queue on the device.
dataset_size (int): The size of dataset.
batch_size (int): The size of batch.
dataset_types (list): The output types of element in dataset.
dataset_shapes (list): The output shapes of element in dataset.
input_indexs (list): The index of data with net.
phase (str): The name of phase, e.g., train_dataset/eval_dataset. Default: 'dataset'.
Returns:
bool, specifies whether the data subgraph was initialized successfully.
"""
if not init_exec_dataset(queue_name=queue_name,
size=dataset_size,
batch_size=batch_size,
types=dataset_types,
shapes=dataset_shapes,
input_indexs=input_indexs,
phase=phase):
raise RuntimeError("Failure to init and dataset subgraph!")
return True
def _build_data_graph(self, obj, params, phase):
if params is None:
self._executor.build_data_graph(obj.parameters_dict(), phase, obj.parameters_broadcast_dict())
elif isinstance(params, OrderedDict):
self._executor.build_data_graph(params, phase)
else:
raise TypeError('Parameters need OrderedDict type, but got {}'.
format(type(params)))
def _params_init_data(self, obj, params, auto_parallel_mode=False):
"""Init parameters' data."""
if params is not None:
for key, param in params.items():
if not auto_parallel_mode:
param.init_data()
elif key not in obj.parameter_layout_dict:
logger.info("Layout dict does not contain the key %s.", key)
param.init_data(set_sliced=True)
else:
layout = obj.parameter_layout_dict[key]
param.init_data(layout, set_sliced=True)
obj.init_parameters_data(auto_parallel_mode=auto_parallel_mode)
def _set_dataset_mode(self, args_list):
"""set dataset mode."""
# decide whether to sink based on whether the inputs is virtual or args_list is ()
if (args_list and isinstance(args_list[0], Tensor) and args_list[0].virtual_flag) or \
(args_list is not None and args_list == ()):
_set_dataset_mode_config('sink')
else:
_set_dataset_mode_config('normal')
def compile(self, obj, *args, phase='predict', params=None, do_convert=True, auto_parallel_mode=False):
"""
Compiles graph.
Args:
obj (Function/Cell): The function or cell instance need compile.
args (tuple): Function or cell input arguments.
phase (str): The name of compile phase. Default: 'predict'.
params (OrderedDict): The parameters dictionary used for init data graph. Default: None.
do_convert (bool): When set to True, convert ME graph to GE graph after compiling graph.
auto_parallel_mode: When set to True, use auto parallel mode to compile graph.
Return:
Str, the full phase of the cell.
Bool, if the graph has been compiled before, return False, else return True.
"""
obj.check_names()
args_names, args_list = _generate_pip_args(obj, *args)
dic = dict(zip(args_names, args_list))
key = generate_key(phase, dic)
self.phase_prefix = str(key[1])
if phase == 'export':
phase = phase + '.' + str(obj.create_time)
else:
phase = self.phase_prefix + phase + '.' + str(obj.create_time)
enable_debug_runtime = context.get_context("enable_debug_runtime")
enable_ge = context.get_context("enable_ge")
use_vm = not enable_ge or (enable_debug_runtime and context.get_context("mode") == context.PYNATIVE_MODE)
self._set_dataset_mode(args_list)
if phase in self.compile_cache.keys():
logger.debug("%r graph has existed.", phase)
return phase, False
result = self._executor.compile(obj, args_list, phase, use_vm)
self.compile_cache[phase] = phase
if not result:
raise RuntimeError("Executor compile failed.")
graph = self._executor.get_func_graph(phase)
if graph is None:
logger.error("%r graph compile failed.", phase)
if not do_convert:
return phase, True
if auto_parallel_mode:
obj.parameter_layout_dict = self._executor.get_parameter_layout(phase)
self._params_init_data(obj, params, auto_parallel_mode)
if not enable_debug_runtime or enable_ge:
if auto_parallel_mode:
obj.load_parameter_slice(params)
# set parallel inputs in sink mode
if auto_parallel_mode and (args and isinstance(args[0], Tensor) and args[0].virtual_flag):
obj.set_parallel_input_with_inputs(*args)
# the following GE init process is not needed when use vm or ms backend
if enable_ge:
self._build_data_graph(obj, params, phase)
if "export" not in phase:
init_phase = "init_subgraph" + "." + str(obj.create_time)
_exec_init_graph(obj, init_phase)
elif not enable_ge and "export" in phase:
self._build_data_graph(obj, params, phase)
return phase, True
def _get_strategy(self, obj):
real_phase = self.phase_prefix + obj.phase + '.' + str(obj.create_time)
return self._executor.get_strategy(real_phase)
def _get_allreduce_fusion(self, obj):
real_phase = self.phase_prefix + obj.phase + '.' + str(obj.create_time)
return self._executor.get_allreduce_fusion(real_phase)
def has_compiled(self, phase='predict'):
"""
Specify whether have been compiled.
Args:
phase (str): The phase name. Default: 'predict'.
Returns:
bool, specifies whether the specific graph has been compiled.
"""
return self._executor.has_compiled(phase)
def __call__(self, obj, *args, phase='predict'):
if context.get_context("precompile_only"):
return None
return self.run(obj, *args, phase=phase)
@_wrap_func
def _exec_pip(self, obj, *args, phase=''):
"""Execute the generated pipeline."""
fn = obj.construct
converted, arguments_dict, parse_method = _convert_function_arguments(fn, *args)
if not converted:
raise RuntimeError('Process method parameter is failure')
args_list = tuple(arguments_dict.values())
obj.__parse_method__ = parse_method
return self._executor(args_list, phase)
def run(self, obj, *args, phase='predict'):
"""
Run the specific graph.
Args:
phase (str): The phase name. Default: 'predict'.
Returns:
Tensor/Tuple, return execute result.
"""
if phase == 'save':
return self._executor((), phase + '.' + str(obj.create_time))
phase_real = self.phase_prefix + phase + '.' + str(obj.create_time)
if self.has_compiled(phase_real):
return self._exec_pip(obj, *args, phase=phase_real)
raise KeyError('{} graph is not exist.'.format(phase_real))
def del_net_res(self, net_id):
self._executor.del_net_res(net_id)
def _get_func_graph_proto(self, exec_id, ir_type="onnx_ir", use_prefix=False):
"""Get graph proto from pipeline."""
if use_prefix:
exec_id = self.phase_prefix + exec_id
if self._executor.has_compiled(exec_id) is False:
return None
return self._executor.get_func_graph_proto(exec_id, ir_type)
def export(self, net, file_name, file_format='GEIR'):
"""
Export graph.
Args:
net (Cell): MindSpore network
file_name (str): File name of model to export
file_format (str): MindSpore currently support 'GEIR' and 'ONNX' format for exported model
"""
from .._c_expression import export_graph
phase = 'export' + '.' + str(net.create_time)
export_graph(file_name, file_format, phase)
_executor = _Executor()
__all__ = ['ms_function']
|
the-stack_0_25030
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaskRcnn based on ResNet50."""
import numpy as np
import mindspore.nn as nn
from mindspore import ops
from mindspore import Tensor, dtype as mstype
from .resnet50 import ResNetFea, ResidualBlockUsing
from .bbox_assign_sample_stage2 import BboxAssignSampleForRcnn
from .fpn_neck import FeatPyramidNeck
from .proposal_generator import Proposal
from .rcnn_cls import RcnnCls
from .rcnn_mask import RcnnMask
from .rpn import RPN
from .roi_align import SingleRoIExtractor
from .anchor_generator import AnchorGenerator
class Mask_Rcnn_Resnet50(nn.Cell):
"""
MaskRcnn Network.
Note:
backbone = resnet50
Returns:
Tuple, tuple of output tensor.
rpn_loss: Scalar, Total loss of RPN subnet.
rcnn_loss: Scalar, Total loss of RCNN subnet.
rpn_cls_loss: Scalar, Classification loss of RPN subnet.
rpn_reg_loss: Scalar, Regression loss of RPN subnet.
rcnn_cls_loss: Scalar, Classification loss of RCNNcls subnet.
rcnn_reg_loss: Scalar, Regression loss of RCNNcls subnet.
rcnn_mask_loss: Scalar, mask loss of RCNNmask subnet.
Examples:
net = Mask_Rcnn_Resnet50()
"""
def __init__(self, config):
super(Mask_Rcnn_Resnet50, self).__init__()
self.train_batch_size = config.batch_size
self.num_classes = config.num_classes
self.anchor_scales = config.anchor_scales
self.anchor_ratios = config.anchor_ratios
self.anchor_strides = config.anchor_strides
self.target_means = tuple(config.rcnn_target_means)
self.target_stds = tuple(config.rcnn_target_stds)
# Anchor generator
anchor_base_sizes = None
self.anchor_base_sizes = list(
self.anchor_strides) if anchor_base_sizes is None else anchor_base_sizes
self.anchor_generators = []
for anchor_base in self.anchor_base_sizes:
self.anchor_generators.append(
AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios))
self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales)
featmap_sizes = config.feature_shapes
assert len(featmap_sizes) == len(self.anchor_generators)
self.anchor_list = self.get_anchors(featmap_sizes)
# Backbone resnet50
self.backbone = ResNetFea(ResidualBlockUsing,
config.resnet_block,
config.resnet_in_channels,
config.resnet_out_channels,
False)
# Fpn
self.fpn_ncek = FeatPyramidNeck(config.fpn_in_channels,
config.fpn_out_channels,
config.fpn_num_outs)
# Rpn and rpn loss
self.gt_labels_stage1 = Tensor(np.ones((self.train_batch_size, config.num_gts)).astype(np.uint8))
self.rpn_with_loss = RPN(config,
self.train_batch_size,
config.rpn_in_channels,
config.rpn_feat_channels,
config.num_anchors,
config.rpn_cls_out_channels)
# Proposal
self.proposal_generator = Proposal(config,
self.train_batch_size,
config.activate_num_classes,
config.use_sigmoid_cls)
self.proposal_generator.set_train_local(config, True)
self.proposal_generator_test = Proposal(config,
config.test_batch_size,
config.activate_num_classes,
config.use_sigmoid_cls)
self.proposal_generator_test.set_train_local(config, False)
# Assign and sampler stage two
self.bbox_assigner_sampler_for_rcnn = BboxAssignSampleForRcnn(config, self.train_batch_size,
config.num_bboxes_stage2, True)
self.decode = ops.BoundingBoxDecode(max_shape=(768, 1280), means=self.target_means, \
stds=self.target_stds)
# Roi
self.roi_align = SingleRoIExtractor(config,
config.roi_layer,
config.roi_align_out_channels,
config.roi_align_featmap_strides,
self.train_batch_size,
config.roi_align_finest_scale,
mask=False)
self.roi_align.set_train_local(config, True)
self.roi_align_mask = SingleRoIExtractor(config,
config.roi_layer,
config.roi_align_out_channels,
config.roi_align_featmap_strides,
self.train_batch_size,
config.roi_align_finest_scale,
mask=True)
self.roi_align_mask.set_train_local(config, True)
self.roi_align_test = SingleRoIExtractor(config,
config.roi_layer,
config.roi_align_out_channels,
config.roi_align_featmap_strides,
1,
config.roi_align_finest_scale,
mask=False)
self.roi_align_test.set_train_local(config, False)
self.roi_align_mask_test = SingleRoIExtractor(config,
config.roi_layer,
config.roi_align_out_channels,
config.roi_align_featmap_strides,
1,
config.roi_align_finest_scale,
mask=True)
self.roi_align_mask_test.set_train_local(config, False)
# Rcnn
self.rcnn_cls = RcnnCls(config, self.train_batch_size, self.num_classes)
self.rcnn_mask = RcnnMask(config, self.train_batch_size, self.num_classes)
# Op declare
self.squeeze = ops.Squeeze()
self.cast = ops.Cast()
self.concat = ops.Concat(axis=0)
self.concat_1 = ops.Concat(axis=1)
self.concat_2 = ops.Concat(axis=2)
self.reshape = ops.Reshape()
self.select = ops.Select()
self.greater = ops.Greater()
self.transpose = ops.Transpose()
# Test mode
self.test_batch_size = config.test_batch_size
self.split = ops.Split(axis=0, output_num=self.test_batch_size)
self.split_shape = ops.Split(axis=0, output_num=4)
self.split_scores = ops.Split(axis=1, output_num=self.num_classes)
self.split_fb_mask = ops.Split(axis=1, output_num=self.num_classes)
self.split_cls = ops.Split(axis=0, output_num=self.num_classes-1)
self.tile = ops.Tile()
self.gather = ops.GatherNd()
self.rpn_max_num = config.rpn_max_num
self.zeros_for_nms = Tensor(np.zeros((self.rpn_max_num, 3)).astype(np.float16))
self.ones_mask = np.ones((self.rpn_max_num, 1)).astype(np.bool)
self.zeros_mask = np.zeros((self.rpn_max_num, 1)).astype(np.bool)
self.bbox_mask = Tensor(np.concatenate((self.ones_mask, self.zeros_mask,
self.ones_mask, self.zeros_mask), axis=1))
self.nms_pad_mask = Tensor(np.concatenate((self.ones_mask, self.ones_mask,
self.ones_mask, self.ones_mask, self.zeros_mask), axis=1))
self.test_score_thresh = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * config.test_score_thr)
self.test_score_zeros = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * 0)
self.test_box_zeros = Tensor(np.ones((self.rpn_max_num, 4)).astype(np.float16) * -1)
self.test_iou_thr = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * config.test_iou_thr)
self.test_max_per_img = config.test_max_per_img
self.nms_test = ops.NMSWithMask(config.test_iou_thr)
self.softmax = ops.Softmax(axis=1)
self.logicand = ops.LogicalAnd()
self.oneslike = ops.OnesLike()
self.test_topk = ops.TopK(sorted=True)
self.test_num_proposal = self.test_batch_size * self.rpn_max_num
# Improve speed
self.concat_start = min(self.num_classes - 2, 55)
self.concat_end = (self.num_classes - 1)
# Init tensor
roi_align_index = [np.array(np.ones((config.num_expected_pos_stage2 + config.num_expected_neg_stage2, 1)) * i,
dtype=np.float16) for i in range(self.train_batch_size)]
roi_align_index_test = [np.array(np.ones((config.rpn_max_num, 1)) * i, dtype=np.float16) \
for i in range(self.test_batch_size)]
self.roi_align_index_tensor = Tensor(np.concatenate(roi_align_index))
self.roi_align_index_test_tensor = Tensor(np.concatenate(roi_align_index_test))
roi_align_index_pos = [np.array(np.ones((config.num_expected_pos_stage2, 1)) * i,
dtype=np.float16) for i in range(self.train_batch_size)]
self.roi_align_index_tensor_pos = Tensor(np.concatenate(roi_align_index_pos))
self.rcnn_loss_cls_weight = Tensor(np.array(config.rcnn_loss_cls_weight).astype(np.float16))
self.rcnn_loss_reg_weight = Tensor(np.array(config.rcnn_loss_reg_weight).astype(np.float16))
self.rcnn_loss_mask_fb_weight = Tensor(np.array(config.rcnn_loss_mask_fb_weight).astype(np.float16))
self.argmax_with_value = ops.ArgMaxWithValue(axis=1)
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0.0, mstype.float32)
self.onehot = ops.OneHot()
self.reducesum = ops.ReduceSum()
self.sigmoid = ops.Sigmoid()
self.expand_dims = ops.ExpandDims()
self.test_mask_fb_zeros = Tensor(np.zeros((self.rpn_max_num, 28, 28)).astype(np.float16))
self.value = Tensor(1.0, mstype.float16)
def construct(self, img_data, img_metas, gt_bboxes, gt_labels, gt_valids, gt_masks):
"""Define MaskRcnn Network"""
x = self.backbone(img_data)
x = self.fpn_ncek(x)
rpn_loss, cls_score, bbox_pred, rpn_cls_loss, rpn_reg_loss, _ = self.rpn_with_loss(x,
img_metas,
self.anchor_list,
gt_bboxes,
self.gt_labels_stage1,
gt_valids)
if self.training:
proposal, proposal_mask = self.proposal_generator(cls_score, bbox_pred, self.anchor_list)
else:
proposal, proposal_mask = self.proposal_generator_test(cls_score, bbox_pred, self.anchor_list)
gt_labels = self.cast(gt_labels, mstype.int32)
gt_valids = self.cast(gt_valids, mstype.int32)
bboxes_tuple = ()
deltas_tuple = ()
labels_tuple = ()
mask_tuple = ()
pos_bboxes_tuple = ()
pos_mask_fb_tuple = ()
pos_labels_tuple = ()
pos_mask_tuple = ()
if self.training:
for i in range(self.train_batch_size):
gt_bboxes_i = self.squeeze(gt_bboxes[i:i + 1:1, ::])
gt_labels_i = self.squeeze(gt_labels[i:i + 1:1, ::])
gt_labels_i = self.cast(gt_labels_i, mstype.uint8)
gt_valids_i = self.squeeze(gt_valids[i:i + 1:1, ::])
gt_valids_i = self.cast(gt_valids_i, mstype.bool_)
gt_masks_i = self.squeeze(gt_masks[i:i + 1:1, ::])
gt_masks_i = self.cast(gt_masks_i, mstype.bool_)
bboxes, deltas, labels, mask, pos_bboxes, pos_mask_fb, pos_labels, pos_mask = \
self.bbox_assigner_sampler_for_rcnn(gt_bboxes_i,
gt_labels_i,
proposal_mask[i],
proposal[i][::, 0:4:1],
gt_valids_i,
gt_masks_i)
bboxes_tuple += (bboxes,)
deltas_tuple += (deltas,)
labels_tuple += (labels,)
mask_tuple += (mask,)
pos_bboxes_tuple += (pos_bboxes,)
pos_mask_fb_tuple += (pos_mask_fb,)
pos_labels_tuple += (pos_labels,)
pos_mask_tuple += (pos_mask,)
bbox_targets = self.concat(deltas_tuple)
rcnn_labels = self.concat(labels_tuple)
bbox_targets = ops.stop_gradient(bbox_targets)
rcnn_labels = ops.stop_gradient(rcnn_labels)
rcnn_labels = self.cast(rcnn_labels, mstype.int32)
rcnn_pos_masks_fb = self.concat(pos_mask_fb_tuple)
rcnn_pos_masks_fb = ops.stop_gradient(rcnn_pos_masks_fb)
rcnn_pos_labels = self.concat(pos_labels_tuple)
rcnn_pos_labels = ops.stop_gradient(rcnn_pos_labels)
rcnn_pos_labels = self.cast(rcnn_pos_labels, mstype.int32)
else:
mask_tuple += proposal_mask
bbox_targets = proposal_mask
rcnn_labels = proposal_mask
rcnn_pos_masks_fb = proposal_mask
rcnn_pos_labels = proposal_mask
for p_i in proposal:
bboxes_tuple += (p_i[::, 0:4:1],)
pos_rois = None
if self.training:
if self.train_batch_size > 1:
bboxes_all = self.concat(bboxes_tuple)
pos_bboxes_all = self.concat(pos_bboxes_tuple)
else:
bboxes_all = bboxes_tuple[0]
pos_bboxes_all = pos_bboxes_tuple[0]
rois = self.concat_1((self.roi_align_index_tensor, bboxes_all))
pos_rois = self.concat_1((self.roi_align_index_tensor_pos, pos_bboxes_all))
pos_rois = self.cast(pos_rois, mstype.float32)
pos_rois = ops.stop_gradient(pos_rois)
else:
if self.test_batch_size > 1:
bboxes_all = self.concat(bboxes_tuple)
else:
bboxes_all = bboxes_tuple[0]
rois = self.concat_1((self.roi_align_index_test_tensor, bboxes_all))
rois = self.cast(rois, mstype.float32)
rois = ops.stop_gradient(rois)
if self.training:
roi_feats = self.roi_align(rois,
self.cast(x[0], mstype.float32),
self.cast(x[1], mstype.float32),
self.cast(x[2], mstype.float32),
self.cast(x[3], mstype.float32))
else:
roi_feats = self.roi_align_test(rois,
self.cast(x[0], mstype.float32),
self.cast(x[1], mstype.float32),
self.cast(x[2], mstype.float32),
self.cast(x[3], mstype.float32))
roi_feats = self.cast(roi_feats, mstype.float16)
rcnn_masks = self.concat(mask_tuple)
rcnn_masks = ops.stop_gradient(rcnn_masks)
rcnn_mask_squeeze = self.squeeze(self.cast(rcnn_masks, mstype.bool_))
rcnn_pos_masks = self.concat(pos_mask_tuple)
rcnn_pos_masks = ops.stop_gradient(rcnn_pos_masks)
rcnn_pos_mask_squeeze = self.squeeze(self.cast(rcnn_pos_masks, mstype.bool_))
rcnn_cls_loss, rcnn_reg_loss = self.rcnn_cls(roi_feats,
bbox_targets,
rcnn_labels,
rcnn_mask_squeeze)
output = ()
if self.training:
roi_feats_mask = self.roi_align_mask(pos_rois,
self.cast(x[0], mstype.float32),
self.cast(x[1], mstype.float32),
self.cast(x[2], mstype.float32),
self.cast(x[3], mstype.float32))
roi_feats_mask = self.cast(roi_feats_mask, mstype.float16)
rcnn_mask_fb_loss = self.rcnn_mask(roi_feats_mask,
rcnn_pos_labels,
rcnn_pos_mask_squeeze,
rcnn_pos_masks_fb)
rcnn_loss = self.rcnn_loss_cls_weight * rcnn_cls_loss + self.rcnn_loss_reg_weight * rcnn_reg_loss + \
self.rcnn_loss_mask_fb_weight * rcnn_mask_fb_loss
output += (rpn_loss, rcnn_loss, rpn_cls_loss, rpn_reg_loss, rcnn_cls_loss, rcnn_reg_loss, rcnn_mask_fb_loss)
else:
mask_fb_pred_all = self.rcnn_mask_test(x, bboxes_all, rcnn_cls_loss, rcnn_reg_loss)
output = self.get_det_bboxes(rcnn_cls_loss, rcnn_reg_loss, rcnn_masks, bboxes_all,
img_metas, mask_fb_pred_all)
return output
def get_det_bboxes(self, cls_logits, reg_logits, mask_logits, rois, img_metas, mask_fb_pred_all):
"""Get the actual detection box."""
scores = self.softmax(cls_logits / self.value)
mask_fb_logits = self.sigmoid(mask_fb_pred_all)
boxes_all = ()
for i in range(self.num_classes):
k = i * 4
reg_logits_i = self.squeeze(reg_logits[::, k:k+4:1])
out_boxes_i = self.decode(rois, reg_logits_i)
boxes_all += (out_boxes_i,)
img_metas_all = self.split(img_metas)
scores_all = self.split(scores)
mask_all = self.split(self.cast(mask_logits, mstype.int32))
mask_fb_all = self.split(mask_fb_logits)
boxes_all_with_batchsize = ()
for i in range(self.test_batch_size):
scale = self.split_shape(self.squeeze(img_metas_all[i]))
scale_h = scale[2]
scale_w = scale[3]
boxes_tuple = ()
for j in range(self.num_classes):
boxes_tmp = self.split(boxes_all[j])
out_boxes_h = boxes_tmp[i] / scale_h
out_boxes_w = boxes_tmp[i] / scale_w
boxes_tuple += (self.select(self.bbox_mask, out_boxes_w, out_boxes_h),)
boxes_all_with_batchsize += (boxes_tuple,)
output = self.multiclass_nms(boxes_all_with_batchsize, scores_all, mask_all, mask_fb_all)
return output
def multiclass_nms(self, boxes_all, scores_all, mask_all, mask_fb_all):
"""Multiscale postprocessing."""
all_bboxes = ()
all_labels = ()
all_masks = ()
all_masks_fb = ()
for i in range(self.test_batch_size):
bboxes = boxes_all[i]
scores = scores_all[i]
masks = self.cast(mask_all[i], mstype.bool_)
masks_fb = mask_fb_all[i]
_mask_fb_all = self.split_fb_mask(masks_fb)
res_boxes_tuple = ()
res_labels_tuple = ()
res_masks_tuple = ()
res_masks_fb_tuple = ()
for j in range(self.num_classes - 1):
k = j + 1
_cls_scores = scores[::, k:k + 1:1]
_bboxes = self.squeeze(bboxes[k])
_mask_o = self.reshape(masks, (self.rpn_max_num, 1))
_masks_fb = self.squeeze(_mask_fb_all[k])
cls_mask = self.greater(_cls_scores, self.test_score_thresh)
_mask = self.logicand(_mask_o, cls_mask)
_reg_mask = self.cast(self.tile(self.cast(_mask, mstype.int32), (1, 4)), mstype.bool_)
_bboxes = self.select(_reg_mask, _bboxes, self.test_box_zeros)
_fb_mask = self.expand_dims(_mask, -1)
_mask_fb_mask = self.cast(self.tile(self.cast(_fb_mask, mstype.int32), (1, 28, 28)), mstype.bool_)
_masks_fb = self.select(_mask_fb_mask, _masks_fb, self.test_mask_fb_zeros)
_cls_scores = self.select(_mask, _cls_scores, self.test_score_zeros)
__cls_scores = self.squeeze(_cls_scores)
scores_sorted, topk_inds = self.test_topk(__cls_scores, self.rpn_max_num)
topk_inds = self.reshape(topk_inds, (self.rpn_max_num, 1))
scores_sorted = self.reshape(scores_sorted, (self.rpn_max_num, 1))
_bboxes_sorted = self.gather(_bboxes, topk_inds)
_mask_fb_sorted = self.gather(_masks_fb, topk_inds)
_mask_sorted = self.gather(_mask, topk_inds)
scores_sorted = self.tile(scores_sorted, (1, 4))
cls_dets = self.concat_1((_bboxes_sorted, scores_sorted))
cls_dets = ops.Slice()(cls_dets, (0, 0), (self.rpn_max_num, 5))
cls_dets, _index, _mask_nms = self.nms_test(cls_dets)
_index = self.reshape(_index, (self.rpn_max_num, 1))
_mask_nms = self.reshape(_mask_nms, (self.rpn_max_num, 1))
_mask_n = self.gather(_mask_sorted, _index)
_mask_n = self.logicand(_mask_n, _mask_nms)
_mask_fb = self.gather(_mask_fb_sorted, _index)
cls_labels = self.oneslike(_index) * j
res_boxes_tuple += (cls_dets,)
res_labels_tuple += (cls_labels,)
res_masks_tuple += (_mask_n,)
res_masks_fb_tuple += (_mask_fb,)
# res_boxes_start = self.concat(res_boxes_tuple[:self.concat_start])
# res_labels_start = self.concat(res_labels_tuple[:self.concat_start])
# res_masks_start = self.concat(res_masks_tuple[:self.concat_start])
# res_masks_fb_start = self.concat(res_masks_fb_tuple[:self.concat_start])
#
# res_boxes_end = self.concat(res_boxes_tuple[self.concat_start:self.concat_end])
# res_labels_end = self.concat(res_labels_tuple[self.concat_start:self.concat_end])
# res_masks_end = self.concat(res_masks_tuple[self.concat_start:self.concat_end])
# res_masks_fb_end = self.concat(res_masks_fb_tuple[self.concat_start:self.concat_end])
#
# res_boxes = self.concat((res_boxes_start, res_boxes_end))
# res_labels = self.concat((res_labels_start, res_labels_end))
# res_masks = self.concat((res_masks_start, res_masks_end))
# res_masks_fb = self.concat((res_masks_fb_start, res_masks_fb_end))
#
res_boxes = self.concat((res_boxes_tuple[self.concat_start:self.concat_end]))
res_labels = self.concat((res_labels_tuple[self.concat_start:self.concat_end]))
res_masks = self.concat((res_masks_tuple[self.concat_start:self.concat_end]))
res_masks_fb = self.concat((res_masks_fb_tuple[self.concat_start:self.concat_end]))
reshape_size = (self.num_classes - 1) * self.rpn_max_num
res_boxes = self.reshape(res_boxes, (1, reshape_size, 5))
res_labels = self.reshape(res_labels, (1, reshape_size, 1))
res_masks = self.reshape(res_masks, (1, reshape_size, 1))
res_masks_fb = self.reshape(res_masks_fb, (1, reshape_size, 28, 28))
all_bboxes += (res_boxes,)
all_labels += (res_labels,)
all_masks += (res_masks,)
all_masks_fb += (res_masks_fb,)
all_bboxes = self.concat(all_bboxes)
all_labels = self.concat(all_labels)
all_masks = self.concat(all_masks)
all_masks_fb = self.concat(all_masks_fb)
return all_bboxes, all_labels, all_masks, all_masks_fb
def get_anchors(self, featmap_sizes):
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
Returns:
tuple: anchors of each image, valid flags of each image
"""
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = ()
for i in range(num_levels):
anchors = self.anchor_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i])
multi_level_anchors += (Tensor(anchors.astype(np.float16)),)
return multi_level_anchors
def rcnn_mask_test(self, x, rois, cls_pred, reg_pred):
"""Prediction masks in an images by the bounding boxes
"""
cls_scores = self.softmax(cls_pred / self.value)
cls_scores_all = self.split(cls_scores)
reg_pred = self.reshape(reg_pred, (-1, self.num_classes, 4))
reg_pred_all = self.split(reg_pred)
rois_all = self.split(rois)
boxes_tuple = ()
for i in range(self.test_batch_size):
cls_score_max_index, _ = self.argmax_with_value(cls_scores_all[i])
cls_score_max_index = self.cast(self.onehot(cls_score_max_index, self.num_classes,
self.on_value, self.off_value), mstype.float16)
cls_score_max_index = self.expand_dims(cls_score_max_index, -1)
cls_score_max_index = self.tile(cls_score_max_index, (1, 1, 4))
reg_pred_max = reg_pred_all[i] * cls_score_max_index
reg_pred_max = self.reducesum(reg_pred_max, 1)
out_boxes_i = self.decode(rois_all[i], reg_pred_max)
boxes_tuple += (out_boxes_i,)
boxes_all = self.concat(boxes_tuple)
boxes_rois = self.concat_1((self.roi_align_index_test_tensor, boxes_all))
boxes_rois = self.cast(boxes_rois, mstype.float32)
roi_feats_mask_test = self.roi_align_mask_test(boxes_rois,
self.cast(x[0], mstype.float32),
self.cast(x[1], mstype.float32),
self.cast(x[2], mstype.float32),
self.cast(x[3], mstype.float32))
roi_feats_mask_test = self.cast(roi_feats_mask_test, mstype.float16)
mask_fb_pred_all = self.rcnn_mask(roi_feats_mask_test)
return mask_fb_pred_all
class MaskRcnn_Infer(nn.Cell):
def __init__(self, config):
super(MaskRcnn_Infer, self).__init__()
self.network = Mask_Rcnn_Resnet50(config)
self.network.set_train(False)
def construct(self, img_data, img_metas):
output = self.network(img_data, img_metas, None, None, None, None)
return output
|
the-stack_0_25032
|
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2020 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from minio import Minio
from minio.credentials import AssumeRoleProvider
# STS endpoint usually point to MinIO server.
sts_endpoint = "https://play.min.io/"
# Access key to fetch credentials from STS endpoint.
access_key = "miniotraining"
# Secret key to fetch credentials from STS endpoint.
secret_key = "miniotraining123"
provider = AssumeRoleProvider(
sts_endpoint,
access_key,
secret_key,
)
#print(provider.retrieve())
client = Minio("play.min.io", credentials=provider)
# Get information of an object.
stat = client.stat_object("training", "cities.csv")
print(stat)
|
the-stack_0_25033
|
"""
Unit test to verify streamer package
"""
import unittest
import unittest.mock
from streamer import split, split_gen
class TestStreamer(unittest.TestCase):
"""
Test split utilities from streamer package
"""
def setUp(self):
"""
Prepare input for splitting
"""
self.records = [
# Length: 84
'I Would Have Thought The Hardest Part Was Serving That Pressing Need Of Yours To ...',
# Length: 64
'Ive Got A Charge In My Head, Im Going To Die Unless You Kill Me!',
# Length: 42
'Just Stay Alive! Im Not Going To Lose You!',
# Length: 55
'If I Let You Know Where Im Going, I Wont Be On Holiday.',
# Length: 40
'Whats Done Is Done When We Say Its Done!',
# Length: 21
'Mission Accomplished!',
# Length: 85
'We just rolled up a snowball and tossed it into hell. Now let’s see what chance it...',
# Length: 42
'Kittridge, Youve Never Seen Me Very Upset!',
# Length: 29
'The Countdown Is Not Helping.',
# Length: 21
'Thats The Wrong Door!',
# Length: 40
'Well Burn That Bridge When We Get To It.',
# Length: 23
'Red Light! Green Light!'
]
def test_split_on_batch_size(self):
"""
Test if batches are being made correctly on reaching size threshold
"""
batches = split(records = self.records,
max_record_size = 70,
max_batch_size = 200,
max_batch_len = 20)
self.assertEqual(len(batches), 3)
self.assertIsInstance(batches, list)
# First record (record 0) is ignored as its size > 70, Batch size: 161
self.assertEqual(batches[0], [self.records[1], self.records[2], self.records[3]])
# Record 6 is ignored as its size > 70, Batch size: 193
self.assertEqual(batches[1], [self.records[4], self.records[5], self.records[7],
self.records[8], self.records[9], self.records[10]])
# Last batch, batch size: 23
self.assertEqual(batches[2], [self.records[11]])
def test_split_on_batch_len(self):
"""
Test if batches are being made correctly on reaching limit on length
"""
batches = split(records = self.records,
max_record_size = 70,
max_batch_size = 400,
max_batch_len = 5)
self.assertEqual(len(batches), 2)
self.assertIsInstance(batches, list)
# First record (record 0) is ignored as its size > 70, Batch length: 5
self.assertEqual(batches[0], [self.records[1], self.records[2], self.records[3],
self.records[4], self.records[5]])
# Record 6 is ignored as its size > 70, Batch length: 5
self.assertEqual(batches[1], [self.records[7], self.records[8], self.records[9],
self.records[10], self.records[11]])
def test_split_gen(self):
"""
Test if generator is creating batches correctly
"""
batches = split_gen(records = self.records,
max_record_size = 70,
max_batch_size = 200,
max_batch_len = 4)
# Generator returned an iterator
self.assertTrue(hasattr(batches, '__iter__'))
# First record (record 0) is ignored as its size > 70
# Batch size: 161, Length: 3, Break on batch size
batch = next(batches)
self.assertEqual(batch, [self.records[1], self.records[2], self.records[3]])
# Record 6 is ignored as its size > 70
# Batch size: 153, Length: 4, Break on batch length
batch = next(batches)
self.assertEqual(batch, [self.records[4], self.records[5], self.records[7],
self.records[8]])
# Last batch, batch size: 84, Length: 3
batch = next(batches)
self.assertEqual(batch, [self.records[9], self.records[10], self.records[11]])
# No more batch
with self.assertRaises(StopIteration):
next(batches)
def test_invalid_config_batch_len(self):
"""
Test if corrrect error is thrown when batch length is configured incorrectly
"""
with self.assertRaises(ValueError) as err_cntx:
split(records = self.records,
max_record_size = 70,
max_batch_size = 200,
max_batch_len = 0)
self.assertEqual('Max batch length must be greater than zero', str(err_cntx.exception))
def test_invalid_config_record_size(self):
"""
Test if corrrect error is thrown when record size is configured incorrectly
"""
with self.assertRaises(ValueError) as err_cntx:
split(records = self.records,
max_record_size = 0,
max_batch_size = 200,
max_batch_len = 5)
self.assertEqual('Max record size must be greater than zero', str(err_cntx.exception))
def test_invalid_config_batch_size(self):
"""
Test if corrrect error is thrown when batch size is configured incorrectly
"""
with self.assertRaises(ValueError) as err_cntx:
split(records = self.records,
max_record_size = 200,
max_batch_size = 100,
max_batch_len = 5)
self.assertEqual('Max batch size must be greater than or equal to max record size',
str(err_cntx.exception))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_25035
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ReservationOrderResponsePaged(Paged):
"""
A paging container for iterating over a list of :class:`ReservationOrderResponse <azure.mgmt.reservations.models.ReservationOrderResponse>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ReservationOrderResponse]'}
}
def __init__(self, *args, **kwargs):
super(ReservationOrderResponsePaged, self).__init__(*args, **kwargs)
|
the-stack_0_25036
|
from copy import copy
import pytest
from tests.unittest_tools import assertFailure_fast
from theano import change_flags
from theano.gof import destroyhandler, graph
from theano.gof.fg import FunctionGraph, InconsistencyError
from theano.gof.graph import Apply, Variable
from theano.gof.op import Op
from theano.gof.opt import (
NavigatorOptimizer,
OpKeyOptimizer,
OpSub,
PatternSub,
TopoOptimizer,
)
from theano.gof.toolbox import ReplaceValidate
from theano.gof.type import Type
def PatternOptimizer(p1, p2, ign=True):
return OpKeyOptimizer(PatternSub(p1, p2), ignore_newtrees=ign)
def OpSubOptimizer(op1, op2, fail=NavigatorOptimizer.warn_ignore, ign=True):
return TopoOptimizer(OpSub(op1, op2), ignore_newtrees=ign, failure_callback=fail)
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def filter(self, data):
return data
def __eq__(self, other):
return isinstance(other, MyType)
def MyVariable(name):
return Variable(MyType(), None, None, name=name)
def MyConstant(data):
return graph.Constant(MyType(), data=data)
class MyOp(Op):
def __init__(
self,
nin,
name,
vmap=None,
dmap=None,
nout=1,
destroyhandler_tolerate_same=None,
destroyhandler_tolerate_aliased=None,
):
if vmap is None:
vmap = {}
if dmap is None:
dmap = {}
if destroyhandler_tolerate_same is None:
destroyhandler_tolerate_same = []
if destroyhandler_tolerate_aliased is None:
destroyhandler_tolerate_aliased = []
self.nin = nin
self.nout = nout
self.name = name
self.destroy_map = dmap
self.view_map = vmap
self.destroyhandler_tolerate_same = destroyhandler_tolerate_same
self.destroyhandler_tolerate_aliased = destroyhandler_tolerate_aliased
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyVariable(self.name + "_R") for i in range(self.nout)]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
sigmoid = MyOp(1, "Sigmoid")
transpose_view = MyOp(1, "TransposeView", vmap={0: [0]})
add = MyOp(2, "Add")
add_in_place = MyOp(2, "AddInPlace", dmap={0: [0]})
add_in_place_2 = MyOp(
2, "AddInPlace", dmap={0: [0]}, destroyhandler_tolerate_same=[(0, 1)]
)
add_in_place_3 = MyOp(
2, "AddInPlace", dmap={0: [0]}, destroyhandler_tolerate_aliased=[(0, 1)]
)
dot = MyOp(2, "Dot")
multiple = MyOp(2, "Multiple", nout=2)
multiple_in_place_0 = MyOp(2, "MultipleInPlace0", nout=2, dmap={0: [0]})
multiple_in_place_1 = MyOp(2, "MultipleInPlace1", nout=2, dmap={1: [1]})
multiple_in_place_0_1 = MyOp(2, "MultipleInPlace01", nout=2, dmap={0: [0], 1: [1]})
def inputs():
x = MyVariable("x")
y = MyVariable("y")
z = MyVariable("z")
return x, y, z
def Env(inputs, outputs, validate=True):
e = FunctionGraph(inputs, outputs, clone=False)
e.attach_feature(destroyhandler.DestroyHandler())
e.attach_feature(ReplaceValidate())
if validate:
e.validate()
return e
class FailureWatch:
# when passed to OpSubOptimizer or PatternOptimizer, counts the
# number of failures
def __init__(self):
self.failures = 0
def __call__(self, exc, nav, pairs, lopt, node):
assert isinstance(exc, InconsistencyError)
self.failures += 1
#################
# Test protocol #
#################
def test_misc():
x, y, z = inputs()
e = transpose_view(transpose_view(transpose_view(transpose_view(x))))
g = Env([x, y, z], [e])
assert g.consistent()
PatternOptimizer((transpose_view, (transpose_view, "x")), "x").optimize(g)
assert str(g) == "FunctionGraph(x)"
new_e = add(x, y)
g.replace_validate(x, new_e)
assert str(g) == "FunctionGraph(Add(x, y))"
g.replace(new_e, dot(add_in_place(x, y), transpose_view(x)))
assert str(g) == "FunctionGraph(Dot(AddInPlace(x, y), TransposeView(x)))"
assert not g.consistent()
######################
# Test protocol skip #
######################
@assertFailure_fast
def test_aliased_inputs_replacement():
x, y, z = inputs()
tv = transpose_view(x)
tvv = transpose_view(tv)
sx = sigmoid(x)
e = add_in_place(x, tv)
g = Env([x, y], [e], False)
assert not g.consistent()
g.replace(tv, sx)
assert g.consistent()
g.replace(sx, tv)
assert not g.consistent()
g.replace(tv, tvv)
assert not g.consistent()
g.replace(tv, sx)
assert g.consistent()
def test_indestructible():
x, y, z = inputs()
x.tag.indestructible = True
x = copy(x)
# checking if indestructible survives the copy!
assert x.tag.indestructible
e = add_in_place(x, y)
g = Env([x, y, z], [e], False)
assert not g.consistent()
g.replace_validate(e, add(x, y))
assert g.consistent()
@assertFailure_fast
def test_usage_loop_through_views_2():
x, y, z = inputs()
e0 = transpose_view(transpose_view(sigmoid(x)))
e = dot(add_in_place(x, y), transpose_view(e0))
g = Env([x, y, z], [e])
assert g.consistent() # because sigmoid can do the copy
g.replace(e0, x)
assert not g.consistent() # we cut off the path to the sigmoid
@assertFailure_fast
def test_destroyers_loop():
# AddInPlace(x, y) and AddInPlace(y, x) should not coexist
x, y, z = inputs()
e1 = add(x, y)
e2 = add(y, x)
g = Env([x, y, z], [e1, e2])
assert g.consistent()
g.replace_validate(e1, add_in_place(x, y))
assert g.consistent()
with pytest.raises(InconsistencyError):
g.replace_validate(e2, add_in_place(y, x))
assert g.consistent()
x, y, z = inputs()
e1 = add(x, y)
e2 = add(y, x)
g = Env([x, y, z], [e1, e2])
assert g.consistent()
g.replace_validate(e2, add_in_place(y, x))
assert g.consistent()
with pytest.raises(InconsistencyError):
g.replace_validate(e1, add_in_place(x, y))
assert g.consistent()
########
# Misc #
########
def test_aliased_inputs():
x, y, z = inputs()
e = add_in_place(x, x)
g = Env([x], [e], False)
assert not g.consistent()
def test_aliased_inputs2():
x, y, z = inputs()
e = add_in_place(x, transpose_view(x))
g = Env([x], [e], False)
assert not g.consistent()
@assertFailure_fast
def test_aliased_inputs_tolerate():
x, y, z = inputs()
e = add_in_place_2(x, x)
g = Env([x], [e], False)
assert g.consistent()
def test_aliased_inputs_tolerate2():
x, y, z = inputs()
e = add_in_place_2(x, transpose_view(x))
g = Env([x], [e], False)
assert not g.consistent()
@assertFailure_fast
def test_same_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, x)
g = Env([x], [e], False)
assert g.consistent()
@assertFailure_fast
def test_different_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, transpose_view(x))
g = Env([x], [e], False)
assert g.consistent()
# warning - don't run this because it would produce the wrong answer
# add_in_place_3 is actually not correct when aliasing of inputs
# is ignored.
def test_indestructible_through_views():
x, y, z = inputs()
x.tag.indestructible = True
tv = transpose_view(x)
e = add_in_place(tv, y)
g = Env([x, y, z], [e], False)
assert not g.consistent()
g.replace_validate(tv, sigmoid(x))
assert g.consistent()
def test_indirect():
x, y, z = inputs()
e0 = add_in_place(x, y)
e = dot(sigmoid(e0), transpose_view(x))
g = Env([x, y, z], [e], False)
assert not g.consistent()
new_e0 = add(x, y)
g.replace(e0, new_e0)
assert g.consistent()
g.replace(new_e0, add_in_place(x, y))
assert not g.consistent()
@assertFailure_fast
def test_indirect_2():
x, y, z = inputs()
e0 = transpose_view(x)
e = dot(sigmoid(add_in_place(x, y)), e0)
g = Env([x, y, z], [e], False)
assert not g.consistent()
new_e0 = add(e0, y)
g.replace(e0, new_e0)
assert g.consistent()
@assertFailure_fast
def test_long_destroyers_loop():
x, y, z = inputs()
e = dot(dot(add_in_place(x, y), add_in_place(y, z)), add(z, x))
g = Env([x, y, z], [e])
assert g.consistent()
OpSubOptimizer(add, add_in_place).optimize(g)
assert g.consistent()
# we don't want to see that!
assert (
str(g)
!= "FunctionGraph(Dot(Dot(AddInPlace(x, y), AddInPlace(y, z)), AddInPlace(z, x)))"
)
e2 = dot(dot(add_in_place(x, y), add_in_place(y, z)), add_in_place(z, x))
with pytest.raises(InconsistencyError):
Env(*graph.clone([x, y, z], [e2]))
def test_misc_2():
x, y, z = inputs()
tv = transpose_view(x)
e = add_in_place(x, tv)
g = Env([x, y], [e], False)
assert not g.consistent()
g.replace(tv, x)
assert not g.consistent()
def test_multi_destroyers():
x, y, z = inputs()
e = add(add_in_place(x, y), add_in_place(x, y))
with pytest.raises(InconsistencyError):
Env([x, y, z], [e])
@assertFailure_fast
def test_multi_destroyers_through_views():
x, y, z = inputs()
e = dot(add(transpose_view(z), y), add(z, x))
g = Env([x, y, z], [e])
assert g.consistent()
fail = FailureWatch()
OpSubOptimizer(add, add_in_place, fail).optimize(g)
assert g.consistent()
assert fail.failures == 1 # should have succeeded once and failed once
def test_repair_destroy_path():
x, y, z = inputs()
e1 = transpose_view(transpose_view(x))
e2 = transpose_view(transpose_view(e1))
e3 = add_in_place(e2, y)
e4 = add_in_place(e1, z)
g = Env([x, y, z], [e3, e4], False)
assert not g.consistent()
g.replace(e2, transpose_view(x))
assert not g.consistent()
def test_usage_loop():
x, y, z = inputs()
g = Env([x, y, z], [dot(add_in_place(x, z), x)], False)
assert not g.consistent()
# replace add_in_place with add
OpSubOptimizer(add_in_place, add).optimize(g)
assert g.consistent()
def test_usage_loop_through_views():
x, y, z = inputs()
aip = add_in_place(x, y)
e = dot(aip, transpose_view(x))
g = Env([x, y, z], [e], False)
assert not g.consistent()
g.replace_validate(aip, add(x, z))
assert g.consistent()
@assertFailure_fast
def test_usage_loop_insert_views():
x, y, z = inputs()
e = dot(add_in_place(x, add(y, z)), sigmoid(sigmoid(sigmoid(sigmoid(sigmoid(x))))))
g = Env([x, y, z], [e])
assert g.consistent()
fail = FailureWatch()
OpSubOptimizer(sigmoid, transpose_view, fail).optimize(g)
assert g.consistent()
# it must keep one sigmoid in the long sigmoid chain
assert fail.failures == 1
def test_value_repl():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
assert g.consistent()
g.replace(sy, MyConstant("abc"))
assert g.consistent()
@change_flags(compute_test_value="off")
def test_value_repl_2():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
assert g.consistent()
g.replace(sy, transpose_view(MyConstant("abc")))
assert g.consistent()
@assertFailure_fast
def test_multiple_inplace():
# this tests issue #5223
# there were some problems with Ops that have more than
# one in-place input.
x, y, z = inputs()
# we will try to replace this op with an in-place version
m = multiple(x, y)
# this makes it impossible to run in-place on x
e_1 = dot(m[0], x)
# try to confuse the DestroyHandler: this dot Op can run
# before multiple and then multiple can still run in-place on y
e_2 = dot(y, y)
g = Env([x, y], [e_1, e_2], False)
assert g.consistent()
# try to work in-place on x/0 and y/1 (this should fail)
fail = FailureWatch()
OpSubOptimizer(multiple, multiple_in_place_0_1, fail).optimize(g)
assert g.consistent()
assert fail.failures == 1
# try to work in-place on x/0 (this should fail)
fail = FailureWatch()
OpSubOptimizer(multiple, multiple_in_place_0, fail).optimize(g)
assert g.consistent()
assert fail.failures == 1
# try to work in-place on y/1 (this should succeed)
fail = FailureWatch()
OpSubOptimizer(multiple, multiple_in_place_1, fail).optimize(g)
assert g.consistent()
assert fail.failures == 0
# try to work in-place on x/0 and y/1 (this should still fail)
fail = FailureWatch()
OpSubOptimizer(multiple_in_place_1, multiple_in_place_0_1, fail).optimize(g)
assert g.consistent()
assert fail.failures == 1
|
the-stack_0_25037
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Executable and reusable sample for archiving a detection rule."""
import argparse
from google.auth.transport import requests
from common import chronicle_auth
CHRONICLE_API_BASE_URL = "https://backstory.googleapis.com"
def archive_rule(http_session: requests.AuthorizedSession, version_id: str):
"""Archives a detection rule.
Archiving a rule will fail if:
- The provided version is not the latest rule version
- The rule is enabled as live
- The rule has retrohunts in progress
If alerting is enabled for a rule, archiving the rule will automatically
disable alerting for the rule.
Args:
http_session: Authorized session for HTTP requests.
version_id: Unique ID of the detection rule to archive ("ru_<UUID>" or
"ru_<UUID>@v_<seconds>_<nanoseconds>"). If a version suffix isn't
specified we use the rule's latest version.
Raises:
requests.exceptions.HTTPError: HTTP request resulted in an error
(response.status_code >= 400).
"""
url = f"{CHRONICLE_API_BASE_URL}/v2/detect/rules/{version_id}:archive"
response = http_session.request("POST", url)
# Expected server response:
# {}
if response.status_code >= 400:
print(response.text)
response.raise_for_status()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
chronicle_auth.add_argument_credentials_file(parser)
parser.add_argument(
"-vi",
"--version_id",
type=str,
required=True,
help="version ID ('ru_<UUID>[@v_<seconds>_<nanoseconds>]')")
args = parser.parse_args()
session = chronicle_auth.init_session(
chronicle_auth.init_credentials(args.credentials_file))
archive_rule(session, args.version_id)
|
the-stack_0_25039
|
from . import logging as logg
from .preprocessing.utils import set_initial_size
import os, re
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from urllib.request import urlretrieve
from pathlib import Path
from scipy.sparse import issparse
from anndata import AnnData
from scanpy import read, read_loom
def load(filename, backup_url=None, header="infer", index_col="infer", **kwargs):
"""Load a csv, txt, tsv or npy file."""
numpy_ext = {"npy", "npz"}
pandas_ext = {"csv", "txt", "tsv"}
if not os.path.exists(filename) and backup_url is None:
raise FileNotFoundError(f"Did not find file {filename}.")
elif not os.path.exists(filename):
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
urlretrieve(backup_url, filename)
ext = Path(filename).suffixes[-1][1:]
if ext in numpy_ext:
return np.load(filename, **kwargs)
elif ext in pandas_ext:
df = pd.read_csv(
filename,
header=header,
index_col=None if index_col == "infer" else index_col,
**kwargs,
)
if index_col == "infer" and len(df.columns) > 1:
is_int_index = all(np.arange(0, len(df)) == df.iloc[:, 0])
is_str_index = isinstance(df.iloc[0, 0], str) and all(
[not isinstance(d, str) for d in df.iloc[0, 1:]]
)
if is_int_index or is_str_index:
df.set_index(df.columns[0], inplace=True)
return df
else:
raise ValueError(
f"'{filename}' does not end on a valid extension.\n"
f"Please, provide one of the available extensions.\n{numpy_ext | pandas_ext}\n"
)
read_csv = load
def clean_obs_names(data, base="[AGTCBDHKMNRSVWY]", ID_length=12, copy=False):
"""Clean up the obs_names.
For example an obs_name 'sample1_AGTCdate' is changed to 'AGTC' of the sample
'sample1_date'. The sample name is then saved in obs['sample_batch'].
The genetic codes are identified according to according to
https://www.neb.com/tools-and-resources/usage-guidelines/the-genetic-code.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
base: `str` (default: `[AGTCBDHKMNRSVWY]`)
Genetic code letters to be identified.
ID_length: `int` (default: 12)
Length of the Genetic Codes in the samples.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
obs_names: list
updated names of the observations
sample_batch: `.obs`
names of the identified sample batches
"""
def get_base_list(name, base):
base_list = base
while re.search(base_list + base, name) is not None:
base_list += base
if len(base_list) == 0:
raise ValueError("Encountered an invalid ID in obs_names: ", name)
return base_list
adata = data.copy() if copy else data
names = adata.obs_names
base_list = get_base_list(names[0], base)
if len(np.unique([len(name) for name in adata.obs_names])) == 1:
start, end = re.search(base_list, names[0]).span()
newIDs = [name[start:end] for name in names]
start, end = 0, len(newIDs[0])
for i in range(end - ID_length):
if np.any([ID[i] not in base for ID in newIDs]):
start += 1
if np.any([ID[::-1][i] not in base for ID in newIDs]):
end -= 1
newIDs = [ID[start:end] for ID in newIDs]
prefixes = [names[i].replace(newIDs[i], "") for i in range(len(names))]
else:
prefixes, newIDs = [], []
for name in names:
match = re.search(base_list, name)
newID = (
re.search(get_base_list(name, base), name).group()
if match is None
else match.group()
)
newIDs.append(newID)
prefixes.append(name.replace(newID, ""))
adata.obs_names = newIDs
if len(prefixes[0]) > 0 and len(np.unique(prefixes)) > 1:
adata.obs["sample_batch"] = (
pd.Categorical(prefixes)
if len(np.unique(prefixes)) < adata.n_obs
else prefixes
)
adata.obs_names_make_unique()
return adata if copy else None
def merge(adata, ldata, copy=True):
"""Merges two annotated data matrices.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix (reference data set).
ldata: :class:`~anndata.AnnData`
Annotated data matrix (to be merged into adata).
Returns
-------
Returns a :class:`~anndata.AnnData` object
"""
adata.var_names_make_unique()
ldata.var_names_make_unique()
if (
"spliced" in ldata.layers.keys()
and "initial_size_spliced" not in ldata.obs.keys()
):
set_initial_size(ldata)
elif (
"spliced" in adata.layers.keys()
and "initial_size_spliced" not in adata.obs.keys()
):
set_initial_size(adata)
common_obs = pd.unique(adata.obs_names.intersection(ldata.obs_names))
common_vars = pd.unique(adata.var_names.intersection(ldata.var_names))
if len(common_obs) == 0:
clean_obs_names(adata)
clean_obs_names(ldata)
common_obs = adata.obs_names.intersection(ldata.obs_names)
if copy:
_adata = adata[common_obs].copy()
_ldata = ldata[common_obs].copy()
else:
adata._inplace_subset_obs(common_obs)
_adata, _ldata = adata, ldata[common_obs].copy()
_adata.var_names_make_unique()
_ldata.var_names_make_unique()
same_vars = len(_adata.var_names) == len(_ldata.var_names) and np.all(
_adata.var_names == _ldata.var_names
)
join_vars = len(common_vars) > 0
if join_vars and not same_vars:
_adata._inplace_subset_var(common_vars)
_ldata._inplace_subset_var(common_vars)
for attr in _ldata.obs.keys():
if attr not in _adata.obs.keys():
_adata.obs[attr] = _ldata.obs[attr]
for attr in _ldata.obsm.keys():
if attr not in _adata.obsm.keys():
_adata.obsm[attr] = _ldata.obsm[attr]
for attr in _ldata.uns.keys():
if attr not in _adata.uns.keys():
_adata.uns[attr] = _ldata.uns[attr]
if join_vars:
for attr in _ldata.layers.keys():
if attr not in _adata.layers.keys():
_adata.layers[attr] = _ldata.layers[attr]
if _adata.shape[1] == _ldata.shape[1]:
same_vars = len(_adata.var_names) == len(_ldata.var_names) and np.all(
_adata.var_names == _ldata.var_names
)
if same_vars:
for attr in _ldata.var.keys():
if attr not in _adata.var.keys():
_adata.var[attr] = _ldata.var[attr]
for attr in _ldata.varm.keys():
if attr not in _adata.varm.keys():
_adata.varm[attr] = _ldata.varm[attr]
else:
raise ValueError("Variable names are not identical.")
return _adata if copy else None
def obs_df(adata, keys, layer=None):
lookup_keys = [k for k in keys if k in adata.var_names]
if len(lookup_keys) < len(keys):
logg.warn(
f"Keys {[k for k in keys if k not in adata.var_names]} "
f"were not found in `adata.var_names`."
)
df = pd.DataFrame(index=adata.obs_names)
for l in lookup_keys:
df[l] = adata.obs_vector(l, layer=layer)
return df
def var_df(adata, keys, layer=None):
lookup_keys = [k for k in keys if k in adata.obs_names]
if len(lookup_keys) < len(keys):
logg.warn(
f"Keys {[k for k in keys if k not in adata.obs_names]} "
f"were not found in `adata.obs_names`."
)
df = pd.DataFrame(index=adata.var_names)
for l in lookup_keys:
df[l] = adata.var_vector(l, layer=layer)
return df
def get_df(
data,
keys=None,
layer=None,
index=None,
columns=None,
sort_values=None,
dropna="all",
precision=None,
):
"""Get dataframe for a specified adata key.
Return values for specified key
(in obs, var, obsm, varm, obsp, varp, uns, or layers) as a dataframe.
Arguments
------
adata
AnnData object or a numpy array to get values from.
keys
Keys from `.var_names`, `.obs_names`, `.var`, `.obs`,
`.obsm`, `.varm`, `.obsp`, `.varp`, `.uns`, or `.layers`.
layer
Layer of `adata` to use as expression values.
index
List to set as index.
columns
List to set as columns names.
sort_values
Wether to sort values by first column (sort_values=True) or a specified column.
dropna
Drop columns/rows that contain NaNs in all ('all') or in any entry ('any').
precision
Set precision for pandas dataframe.
Returns
-------
A dataframe.
"""
if precision is not None:
pd.set_option("precision", precision)
if isinstance(data, AnnData):
keys, keys_split = (
keys.split("*") if isinstance(keys, str) and "*" in keys else (keys, None)
)
keys, key_add = (
keys.split("/") if isinstance(keys, str) and "/" in keys else (keys, None)
)
keys = [keys] if isinstance(keys, str) else keys
key = keys[0]
s_keys = ["obs", "var", "obsm", "varm", "uns", "layers"]
d_keys = [
data.obs.keys(),
data.var.keys(),
data.obsm.keys(),
data.varm.keys(),
data.uns.keys(),
data.layers.keys(),
]
if hasattr(data, "obsp") and hasattr(data, "varp"):
s_keys.extend(["obsp", "varp"])
d_keys.extend([data.obsp.keys(), data.varp.keys()])
if keys is None:
df = data.to_df()
elif key in data.var_names:
df = obs_df(data, keys, layer=layer)
elif key in data.obs_names:
df = var_df(data, keys, layer=layer)
else:
if keys_split is not None:
keys = [
k
for k in list(data.obs.keys()) + list(data.var.keys())
if key in k and keys_split in k
]
key = keys[0]
s_key = [s for (s, d_key) in zip(s_keys, d_keys) if key in d_key]
if len(s_key) == 0:
raise ValueError(f"'{key}' not found in any of {', '.join(s_keys)}.")
if len(s_key) > 1:
logg.warn(f"'{key}' found multiple times in {', '.join(s_key)}.")
s_key = s_key[-1]
df = getattr(data, s_key)[keys if len(keys) > 1 else key]
if key_add is not None:
df = df[key_add]
if index is None:
index = (
data.var_names
if s_key == "varm"
else data.obs_names
if s_key in {"obsm", "layers"}
else None
)
if index is None and s_key == "uns" and hasattr(df, "shape"):
key_cats = np.array(
[
key
for key in data.obs.keys()
if is_categorical_dtype(data.obs[key])
]
)
num_cats = [
len(data.obs[key].cat.categories) == df.shape[0]
for key in key_cats
]
if np.sum(num_cats) == 1:
index = data.obs[key_cats[num_cats][0]].cat.categories
if (
columns is None
and len(df.shape) > 1
and df.shape[0] == df.shape[1]
):
columns = index
elif isinstance(index, str) and index in data.obs.keys():
index = pd.Categorical(data.obs[index]).categories
if columns is None and s_key == "layers":
columns = data.var_names
elif isinstance(columns, str) and columns in data.obs.keys():
columns = pd.Categorical(data.obs[columns]).categories
elif isinstance(data, pd.DataFrame):
if isinstance(keys, str) and "*" in keys:
keys, keys_split = keys.split("*")
keys = [k for k in data.columns if keys in k and keys_split in k]
df = data[keys] if keys is not None else data
else:
df = data
if issparse(df):
df = np.array(df.A)
if columns is None and hasattr(df, "names"):
columns = df.names
df = pd.DataFrame(df, index=index, columns=columns)
if dropna:
df.replace("", np.nan, inplace=True)
how = dropna if isinstance(dropna, str) else "any" if dropna is True else "all"
df.dropna(how=how, axis=0, inplace=True)
df.dropna(how=how, axis=1, inplace=True)
if sort_values:
sort_by = (
sort_values
if isinstance(sort_values, str) and sort_values in df.columns
else df.columns[0]
)
df = df.sort_values(by=sort_by, ascending=False)
if hasattr(data, "var_names"):
if df.index[0] in data.var_names:
df.var_names = df.index
elif df.columns[0] in data.var_names:
df.var_names = df.columns
if hasattr(data, "obs_names"):
if df.index[0] in data.obs_names:
df.obs_names = df.index
elif df.columns[0] in data.obs_names:
df.obs_names = df.columns
return df
DataFrame = get_df
def load_biomart():
# human genes from https://biomart.genenames.org/martform
# mouse genes from http://www.ensembl.org/biomart/martview
# antibodies from https://www.biolegend.com/en-us/totalseq
nb_url = "https://github.com/theislab/scvelo_notebooks/raw/master/"
filename = "data/biomart/mart_export_human.txt"
df = load(filename, sep="\t", backup_url=f"{nb_url}{filename}")
df.columns = ["ensembl", "gene name"]
df.index = df.pop("ensembl")
filename = "data/biomart/mart_export_mouse.txt"
df2 = load(filename, sep="\t", backup_url=f"{nb_url}{filename}")
df2.columns = ["ensembl", "gene name"]
df2.index = df2.pop("ensembl")
df = pd.concat([df, df2])
return df
def convert_to_gene_names(ensembl_names=None):
df = load_biomart()
if ensembl_names is not None:
if isinstance(ensembl_names, str):
ensembl_names = ensembl_names
valid_names = [name for name in ensembl_names if name in df.index]
if len(valid_names) > 0:
df = df.loc[valid_names]
gene_names = np.array(ensembl_names)
idx = pd.DataFrame(ensembl_names).isin(df.index).values.flatten()
gene_names[idx] = df["gene name"].values
df = pd.DataFrame([ensembl_names, gene_names]).T
df.columns = ["ensembl", "gene name"]
df.index = df.pop("ensembl")
return df
def gene_info(name, fields="name,symbol,refseq,generif,ensembl"):
try:
from biothings_client import get_client
except ImportError:
raise ImportError(
"Please install Biothings first via `pip install biothings_client`."
)
class MyGeneInfo(get_client("gene", instance=False)):
def __init__(self):
super(MyGeneInfo, self).__init__()
if not name.startswith("ENS"):
df = convert_to_gene_names()
df.reset_index(inplace=True)
df.set_index("gene name", inplace=True)
if name in df.index:
name = df.loc[name][0]
info = MyGeneInfo().getgene(name, fields)
return info
|
the-stack_0_25041
|
# -*- coding: utf-8 -*-
import os
import re
import time
import unittest
import sys
import requests
from base_test_class import BaseTestCase
class Login(BaseTestCase):
def get_api_key(self):
driver = self.login_page()
driver.get(self.base_url + "api/key")
time.sleep(3)
api_text = driver.find_element_by_tag_name("BODY").text
r_pattern = re.compile('Your current API key is (\\w+)')
r_match = r_pattern.search(api_text)
return r_match.group(1)
def test_engagement_status(self):
api_key = self.get_api_key()
api_url = self.base_url + "api/v1/engagements"
user = os.environ['DD_ADMIN_USER']
headers = {'content-type': 'application/json',
'Authorization': 'ApiKey %s:%s' % (user, api_key)}
r = requests.get(api_url, headers=headers, verify=False)
self.assertEqual(r.status_code, 200)
def test_finding_status(self):
api_key = self.get_api_key()
api_url = self.base_url + "api/v1/findings"
user = os.environ['DD_ADMIN_USER']
headers = {'content-type': 'application/json',
'Authorization': 'ApiKey %s:%s' % (user, api_key)}
r = requests.get(api_url, headers=headers, verify=False)
self.assertEqual(r.status_code, 200)
def test_product_status(self):
api_key = self.get_api_key()
api_url = self.base_url + "api/v1/products"
user = os.environ['DD_ADMIN_USER']
headers = {'content-type': 'application/json',
'Authorization': 'ApiKey %s:%s' % (user, api_key)}
r = requests.get(api_url, headers=headers, verify=False)
self.assertEqual(r.status_code, 200)
def test_t_status(self):
api_key = self.get_api_key()
api_url = self.base_url + "api/v1/tests"
user = os.environ['DD_ADMIN_USER']
headers = {'content-type': 'application/json',
'Authorization': 'ApiKey %s:%s' % (user, api_key)}
r = requests.get(api_url, headers=headers, verify=False)
self.assertEqual(r.status_code, 200)
def suite():
suite = unittest.TestSuite()
suite.addTest(Login('setUp'))
suite.addTest(Login('login_page'))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(descriptions=True, failfast=True, verbosity=2)
ret = not runner.run(suite()).wasSuccessful()
BaseTestCase.tearDownDriver()
sys.exit(ret)
|
the-stack_0_25042
|
# coding: utf-8
# In[209]:
import argparse
import os
import datetime as dt
import oauth2 as oauth
import json
import urllib.request as ur
import urllib.parse as par
from requests_oauthlib import OAuth1
import requests
API_KEY = "fIsG5ocnl8SVgbxD0aESYDkhz"
API_SECRET = "IKytIIG1oTbKwtdAuVUbJSzyXv4DnF8TTJkWHzgYPqG4hx1bQR"
ACCESS_TOKEN = "788621334687391744-T0qILt4jZgXUyOdmjs7lQWgKtBwXFTR"
ACCESS_TOKEN_SECRET = "zCH6Idk3DFzPUl7xBKP4cDVgNUOeu3vcBhTEp0DabKxKL"
url = 'https://api.twitter.com/1.1/account/verify_credentials.json'
auth = OAuth1(API_KEY, API_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
requests.get(url, auth=auth)
path = "C:\\Shamal\\NEU\\Python\\imdb\\imdbnew\\twitter\\FollowCount"
os.chdir(path)
# In[210]:
import csv
D = []
with open('links.csv', 'r') as f:
for line in f.readlines():
movieId,imdbId,tmdbId = line.strip().split(',')
#print (len(imdbId))
if(len(imdbId) == 4 and imdbId != 'imdbId'):
imdbIdNew = 'tt000' + imdbId
if(len(imdbId) == 5 and imdbId != 'imdbId'):
imdbIdNew = 'tt00' + imdbId
if(len(imdbId) == 3 and imdbId != 'imdbId'):
imdbIdNew = 'tt0000' + imdbId
if(len(imdbId) == 2 and imdbId != 'imdbId'):
imdbIdNew = 'tt00000' + imdbId
if(len(imdbId) == 1 and imdbId != 'imdbId'):
imdbIdNew = 'tt000000' + imdbId
if(len(imdbId) == 6 and imdbId != 'imdbId'):
imdbIdNew = 'tt0' + imdbId
#print (len(imdbId))
if(len(imdbId) == 7 and imdbId != 'imdbId'):
imdbIdNew = 'tt' + imdbId
if(len(imdbId) == 7 and imdbId != 'imdbId'):
D.append(imdbIdNew)
# In[ ]:
searchTermnew = "imdb_" + dt.datetime.now().strftime('%Y-%m-%d') + ".json"
def createMovieFile(r, path,usrID):
file_loc_movie = os.path.join(path, searchTermnew)
with open('file_loc_movie', 'a',newline='') as f:
json.dump(r.json(), f)
f.write(os.linesep)
for l in D[:10000]:
url1 = "http://www.omdbapi.com/?i=" + str(l) + "&plot=short&r=json"
r = requests.get(url1)
createMovieFile(r, path,l)
print (r.json())
# In[212]:
my_list =[]
with open('file_loc_movie') as f:
my_list = [json.loads(line) for line in f]
print (len(my_list))
# In[213]:
import csv
with open('output6.csv', 'w',newline='') as myfile:
wr = csv.writer(myfile,delimiter=',', quoting=csv.QUOTE_ALL)
wr.writerow(['Year','Runtime','imdbRating','Genre','imdbID',
'Metascore','Title','imdbVotes','Type','Language','Director','Awards','Actors','Plot',
'Country','Rated','Writer','Released'])
for itm in my_list:
wr.writerow([itm['Year'],itm['Runtime'], itm['imdbRating'],itm['Genre'],itm['imdbID'],
itm['Metascore'],itm['Title'], itm['imdbVotes'],itm['Type'],itm['Language'], itm['Director'],
itm['Awards'],itm['Actors'], itm['Plot'],itm['Country'], itm['Rated'],itm['Writer'],
itm['Released']])
# In[231]:
S= []
with open('output6.csv','r') as csvinput:
reader = csv.reader(csvinput)
row = next(reader)
for row in reader:
spaceTitleinput = row[6].replace(" ", "").lower()
S.append(spaceTitleinput)
print (spaceTitleinput)
# In[97]:
#S=[]
#for l in D:
# for i in range(10):
# url1 = "http://www.omdbapi.com/?i=" + str(l) + "&plot=short&r=json"
# r = requests.get(url1)
# keyTitle = r.json()['Title'].replace(" ", "")
# S.append(keyTitle)
# break
# In[233]:
import re
for i in S:
#url1 = "https://api.twitter.com/1.1/friends/ids.json?screen_name=%23vineebhole"
i = re.sub('[^A-Za-z0-9]+', '', i)
url1 = "https://api.twitter.com/1.1/users/show.json?screen_name=" + i
#print (i)
r = requests.get(url1,auth=auth)
if (r.json().get('screen_name')):
#print (r.json())
createjsonFile(r, path)
# In[120]:
#searchTermnew = "adhm" +"_" + dt.datetime.now().strftime('%Y-%m-%d') + ".json"
#def createFile(r, path):
# file_loc = os.path.join(path, searchTermnew)
# try:
# fp = open(file_loc)
# except IOError:
# with open(file_loc, 'w') as f:
# json.dump(r.json(), f)
# f.write(os.linesep)
# In[230]:
searchTermnew = "imdb_" + dt.datetime.now().strftime('%Y-%m-%d') + ".json"
def createjsonFile(r, path):
file_loc = os.path.join(path, searchTermnew)
with open('file_loc', 'a',newline='') as f:
json.dump(r.json(), f)
f.write(os.linesep)
# In[234]:
my_listMovie =[]
with open('file_loc') as f:
my_listMovie = [json.loads(line) for line in f]
print (len(my_listMovie))
# In[235]:
for itm in my_listMovie:
print (itm['screen_name'])
# In[199]:
for itm in my_listMovie:
if(itm['screen_name'] != "heat"):
print ("true")
# In[236]:
with open('output6.csv','r') as csvinput:
with open('output6New.csv', 'w') as csvoutput:
writer = csv.writer(csvoutput, lineterminator='\n')
reader = csv.reader(csvinput)
#print (reader)
all = []
row = next(reader)
row.append('Latest Follower Count')
all.append(row)
#print (all)
for row in reader:
flag = 'False'
for itm in my_listMovie:
spaceTitleinput = row[6].replace(" ", "").lower()
spacetitleListlower = itm.get('screen_name').lower()
#print (spacetitleListlower)
#print (spaceTitleinput)
if(spacetitleListlower == spaceTitleinput):
flag = 'True'
row.append(itm.get('followers_count'))
all.append(row)
if(flag == 'False'):
row.append("0")
all.append(row)
writer.writerows(all)
# In[ ]:
|
the-stack_0_25045
|
"""Support for Verisure devices."""
from __future__ import annotations
import asyncio
import os
from typing import Any
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
DOMAIN as ALARM_CONTROL_PANEL_DOMAIN,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.lock import DOMAIN as LOCK_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_REAUTH, ConfigEntry
from homeassistant.const import (
CONF_EMAIL,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.storage import STORAGE_DIR
from .const import (
CONF_CODE_DIGITS,
CONF_DEFAULT_LOCK_CODE,
CONF_GIID,
CONF_LOCK_CODE_DIGITS,
CONF_LOCK_DEFAULT_CODE,
DEFAULT_LOCK_CODE_DIGITS,
DOMAIN,
)
from .coordinator import VerisureDataUpdateCoordinator
PLATFORMS = [
ALARM_CONTROL_PANEL_DOMAIN,
BINARY_SENSOR_DOMAIN,
CAMERA_DOMAIN,
LOCK_DOMAIN,
SENSOR_DOMAIN,
SWITCH_DOMAIN,
]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_CODE_DIGITS): cv.positive_int,
vol.Optional(CONF_GIID): cv.string,
vol.Optional(CONF_DEFAULT_LOCK_CODE): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict[str, Any]) -> bool:
"""Set up the Verisure integration."""
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_EMAIL: config[DOMAIN][CONF_USERNAME],
CONF_PASSWORD: config[DOMAIN][CONF_PASSWORD],
CONF_GIID: config[DOMAIN].get(CONF_GIID),
CONF_LOCK_CODE_DIGITS: config[DOMAIN].get(CONF_CODE_DIGITS),
CONF_LOCK_DEFAULT_CODE: config[DOMAIN].get(CONF_LOCK_DEFAULT_CODE),
},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Verisure from a config entry."""
# Migrate old YAML settings (hidden in the config entry),
# to config entry options. Can be removed after YAML support is gone.
if CONF_LOCK_CODE_DIGITS in entry.data or CONF_DEFAULT_LOCK_CODE in entry.data:
options = entry.options.copy()
if (
CONF_LOCK_CODE_DIGITS in entry.data
and CONF_LOCK_CODE_DIGITS not in entry.options
and entry.data[CONF_LOCK_CODE_DIGITS] != DEFAULT_LOCK_CODE_DIGITS
):
options.update(
{
CONF_LOCK_CODE_DIGITS: entry.data[CONF_LOCK_CODE_DIGITS],
}
)
if (
CONF_DEFAULT_LOCK_CODE in entry.data
and CONF_DEFAULT_LOCK_CODE not in entry.options
):
options.update(
{
CONF_DEFAULT_LOCK_CODE: entry.data[CONF_DEFAULT_LOCK_CODE],
}
)
data = entry.data.copy()
data.pop(CONF_LOCK_CODE_DIGITS, None)
data.pop(CONF_DEFAULT_LOCK_CODE, None)
hass.config_entries.async_update_entry(entry, data=data, options=options)
# Continue as normal...
coordinator = VerisureDataUpdateCoordinator(hass, entry=entry)
if not await coordinator.async_login():
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={"entry": entry},
)
return False
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, coordinator.async_logout)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
# Set up all platforms for this device/entry.
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Verisure config entry."""
unload_ok = all(
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
)
)
)
if not unload_ok:
return False
cookie_file = hass.config.path(STORAGE_DIR, f"verisure_{entry.entry_id}")
try:
await hass.async_add_executor_job(os.unlink, cookie_file)
except FileNotFoundError:
pass
del hass.data[DOMAIN][entry.entry_id]
if not hass.data[DOMAIN]:
del hass.data[DOMAIN]
return True
|
the-stack_0_25046
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch import storage
from fate_arch.metastore.db_utils import StorageConnector
from fate_arch.session import Session
from fate_arch.storage import StorageTableMeta, StorageTableOrigin
from fate_flow.entity import RunParameters
from fate_flow.manager.data_manager import DataTableTracker, TableStorage
from fate_flow.operation.job_saver import JobSaver
from fate_flow.operation.job_tracker import Tracker
from fate_flow.worker.task_executor import TaskExecutor
from fate_flow.utils.api_utils import get_json_result, error_response
from fate_flow.utils import job_utils, schedule_utils
from flask import request
from fate_flow.utils.detect_utils import validate_request
@manager.route('/connector/create', methods=['POST'])
def create_storage_connector():
request_data = request.json
address = StorageTableMeta.create_address(request_data.get("engine"), request_data.get("connector_info"))
connector = StorageConnector(connector_name=request_data.get("connector_name"), engine=request_data.get("engine"),
connector_info=address.connector)
connector.create_or_update()
return get_json_result(retcode=0, retmsg='success')
@manager.route('/connector/query', methods=['POST'])
def query_storage_connector():
request_data = request.json
connector = StorageConnector(connector_name=request_data.get("connector_name"))
return get_json_result(retcode=0, retmsg='success', data=connector.get_info())
@manager.route('/add', methods=['post'])
@manager.route('/bind', methods=['post'])
@validate_request("engine", "address", "namespace", "name")
def table_bind():
request_data = request.json
address_dict = request_data.get('address')
engine = request_data.get('engine')
name = request_data.get('name')
namespace = request_data.get('namespace')
address = storage.StorageTableMeta.create_address(storage_engine=engine, address_dict=address_dict)
in_serialized = request_data.get("in_serialized", 1 if engine in {storage.StorageEngine.STANDALONE, storage.StorageEngine.EGGROLL,
storage.StorageEngine.MYSQL, storage.StorageEngine.PATH} else 0)
destroy = (int(request_data.get("drop", 0)) == 1)
data_table_meta = storage.StorageTableMeta(name=name, namespace=namespace)
if data_table_meta:
if destroy:
data_table_meta.destroy_metas()
else:
return get_json_result(retcode=100,
retmsg='The data table already exists.'
'If you still want to continue uploading, please add the parameter --drop')
id_column = request_data.get("id_column") or request_data.get("id_name")
feature_column = request_data.get("feature_column") or request_data.get("feature_name")
schema = None
if id_column and feature_column:
schema = {'header': feature_column, 'sid': id_column}
elif id_column:
schema = {'sid': id_column, 'header': ''}
sess = Session()
storage_session = sess.storage(storage_engine=engine, options=request_data.get("options"))
table = storage_session.create_table(address=address, name=name, namespace=namespace,
partitions=request_data.get('partitions', None),
hava_head=request_data.get("head"), schema=schema,
id_delimiter=request_data.get("id_delimiter"), in_serialized=in_serialized,
origin=request_data.get("origin", StorageTableOrigin.TABLE_BIND))
response = get_json_result(data={"table_name": name, "namespace": namespace})
if not table.check_address():
response = get_json_result(retcode=100, retmsg=f'engine {engine} address {address_dict} check failed')
else:
DataTableTracker.create_table_tracker(
table_name=name,
table_namespace=namespace,
entity_info={"have_parent": False},
)
sess.destroy_all_sessions()
return response
@manager.route('/download', methods=['get'])
def table_download():
request_data = request.json
from fate_flow.component_env_utils.env_utils import import_component_output_depend
import_component_output_depend()
data_table_meta = storage.StorageTableMeta(name=request_data.get("name"), namespace=request_data.get("namespace"))
if not data_table_meta:
return error_response(response_code=210, retmsg=f'no found table:{request_data.get("namespace")}, {request_data.get("name")}')
tar_file_name = 'table_{}_{}.tar.gz'.format(request_data.get("namespace"), request_data.get("name"))
return TableStorage.send_table(
output_tables_meta={"table": data_table_meta},
tar_file_name=tar_file_name,
need_head=request_data.get("head", True)
)
@manager.route('/delete', methods=['post'])
def table_delete():
request_data = request.json
table_name = request_data.get('table_name')
namespace = request_data.get('namespace')
data = None
sess = Session()
table = sess.get_table(name=table_name, namespace=namespace, ignore_disable=True)
if table:
table.destroy()
data = {'table_name': table_name, 'namespace': namespace}
sess.destroy_all_sessions()
if data:
return get_json_result(data=data)
return get_json_result(retcode=101, retmsg='no find table')
@manager.route('/disable', methods=['post'])
@manager.route('/enable', methods=['post'])
def table_disable():
request_data = request.json
adapter_request_data(request_data)
disable = True if request.url.endswith("disable") else False
tables_meta = storage.StorageTableMeta.query_table_meta(filter_fields=dict(**request_data))
data = []
if tables_meta:
for table_meta in tables_meta:
storage.StorageTableMeta(name=table_meta.f_name,
namespace=table_meta.f_namespace
).update_metas(disable=disable)
data.append({'table_name': table_meta.f_name, 'namespace': table_meta.f_namespace})
return get_json_result(data=data)
return get_json_result(retcode=101, retmsg='no find table')
@manager.route('/disable/delete', methods=['post'])
def table_delete_disable():
request_data = request.json
adapter_request_data(request_data)
tables_meta = storage.StorageTableMeta.query_table_meta(filter_fields={"disable": True})
data = []
sess = Session()
for table_meta in tables_meta:
table = sess.get_table(name=table_meta.f_name, namespace=table_meta.f_namespace, ignore_disable=True)
if table:
table.destroy()
data.append({'table_name': table_meta.f_name, 'namespace': table_meta.f_namespace})
sess.destroy_all_sessions()
if data:
return get_json_result(data=data)
return get_json_result(retcode=101, retmsg='no find table')
@manager.route('/list', methods=['post'])
@validate_request('job_id', 'role', 'party_id')
def get_job_table_list():
jobs = JobSaver.query_job(**request.json)
if jobs:
job = jobs[0]
tables = get_job_all_table(job)
return get_json_result(data=tables)
else:
return get_json_result(retcode=101, retmsg='no find job')
@manager.route('/<table_func>', methods=['post'])
def table_api(table_func):
config = request.json
if table_func == 'table_info':
table_key_count = 0
table_partition = None
table_schema = None
table_name, namespace = config.get("name") or config.get("table_name"), config.get("namespace")
table_meta = storage.StorageTableMeta(name=table_name, namespace=namespace)
address = None
enable = True
origin = None
if table_meta:
table_key_count = table_meta.get_count()
table_partition = table_meta.get_partitions()
table_schema = table_meta.get_schema()
address = table_meta.get_address().__dict__
enable = not table_meta.get_disable()
origin = table_meta.get_origin()
exist = 1
else:
exist = 0
return get_json_result(data={"table_name": table_name,
"namespace": namespace,
"exist": exist,
"count": table_key_count,
"partition": table_partition,
"schema": table_schema,
"enable": enable,
"origin": origin,
"address": address,
})
else:
return get_json_result()
@manager.route('/tracking/source', methods=['post'])
@validate_request("table_name", "namespace")
def table_tracking():
request_info = request.json
data = DataTableTracker.get_parent_table(request_info.get("table_name"), request_info.get("namespace"))
return get_json_result(data=data)
@manager.route('/tracking/job', methods=['post'])
@validate_request("table_name", "namespace")
def table_tracking_job():
request_info = request.json
data = DataTableTracker.track_job(request_info.get("table_name"), request_info.get("namespace"), display=True)
return get_json_result(data=data)
def get_job_all_table(job):
dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl,
runtime_conf=job.f_runtime_conf,
train_runtime_conf=job.f_train_runtime_conf
)
_, hierarchical_structure = dsl_parser.get_dsl_hierarchical_structure()
component_table = {}
try:
component_output_tables = Tracker.query_output_data_infos(job_id=job.f_job_id, role=job.f_role,
party_id=job.f_party_id)
except:
component_output_tables = []
for component_name_list in hierarchical_structure:
for component_name in component_name_list:
component_table[component_name] = {}
component_input_table = get_component_input_table(dsl_parser, job, component_name)
component_table[component_name]['input'] = component_input_table
component_table[component_name]['output'] = {}
for output_table in component_output_tables:
if output_table.f_component_name == component_name:
component_table[component_name]['output'][output_table.f_data_name] = \
{'name': output_table.f_table_name, 'namespace': output_table.f_table_namespace}
return component_table
def get_component_input_table(dsl_parser, job, component_name):
component = dsl_parser.get_component_info(component_name=component_name)
module_name = get_component_module(component_name, job.f_dsl)
if 'reader' in module_name.lower():
return job.f_runtime_conf.get("component_parameters", {}).get("role", {}).get(job.f_role, {}).get(str(job.f_roles.get(job.f_role).index(int(job.f_party_id)))).get(component_name)
task_input_dsl = component.get_input()
job_args_on_party = TaskExecutor.get_job_args_on_party(dsl_parser=dsl_parser,
job_runtime_conf=job.f_runtime_conf, role=job.f_role,
party_id=job.f_party_id)
config = job_utils.get_job_parameters(job.f_job_id, job.f_role, job.f_party_id)
task_parameters = RunParameters(**config)
job_parameters = task_parameters
component_input_table = TaskExecutor.get_task_run_args(job_id=job.f_job_id, role=job.f_role,
party_id=job.f_party_id,
task_id=None,
task_version=None,
job_args=job_args_on_party,
job_parameters=job_parameters,
task_parameters=task_parameters,
input_dsl=task_input_dsl,
get_input_table=True
)
return component_input_table
def get_component_module(component_name, job_dsl):
return job_dsl["components"][component_name]["module"].lower()
def adapter_request_data(request_data):
if request_data.get("table_name"):
request_data["name"] = request_data.get("table_name")
|
the-stack_0_25047
|
"""
Types used on a per-build basis.
"""
from __future__ import absolute_import, division, print_function
from mybuild._compat import *
import logging
from collections import deque
from functools import partial
from itertools import product, starmap
from mybuild.core import InstanceError
from mybuild.req.pgraph import And, AtMostOne, Atom, Pgraph
from mybuild.req.solver import solve
from mybuild.util.itertools import pop_iter
__author__ = "Eldar Abusalimov"
__date__ = "2012-11-09"
__all__ = [
"Context",
"resolve",
]
logger = logging.getLogger(__name__)
class Context(object):
"""docstring for Context"""
def __init__(self):
super(Context, self).__init__()
self._domains = dict() # {module: domain}, domain is optuple of sets
self._providers = dict() # {module: provider}
self._instantiation_queue = deque()
self.pgraph = ContextPgraph(self)
self.instance_nodes = list()
def domain_for(self, module):
try:
domain = self._domains[module]
except KeyError:
domain = self._domains[module] = \
module._opmake(set(optype._values)
for optype in module._optypes)
self.post_product(domain)
return domain
def post(self, optuple, origin=None):
logger.debug("add %s (posted by %s)", optuple, origin)
self._instantiation_queue.append((optuple, origin))
def post_product(self, iterables_optuple, origin=None):
for optuple in map(iterables_optuple._make,
product(*iterables_optuple)):
self.post(optuple, origin)
def post_discover(self, optuple, origin=None):
domain = self.domain_for(optuple._module)
logger.debug("discover %s (posted by %s)", optuple, origin)
for value, domain_to_extend in optuple._zipwith(domain):
if value in domain_to_extend:
continue
domain_to_extend.add(value)
self.post_product(optuple._make(option_domain
if option_domain is not domain_to_extend else (value,)
for option_domain in domain), origin)
def init_module_providers(self, module):
if module not in self._providers:
self._providers[module] = set()
def init_instance_providers(self, instance):
self.init_module_providers(type(instance))
for module in instance.provides:
# Just in case it is not discovered yet.
self.init_module_providers(module)
self._providers[module].add(instance)
def instantiate(self, optuple, origin=None):
g = self.pgraph
node = g.node_for(optuple)
logger.debug("new %s (posted by %s)", optuple, origin)
try:
instance = optuple._instantiate_module()
except InstanceError as error:
logger.debug(" %s inviable: %s", optuple, error)
node.error = error
g.new_const(False, node,
why=why_inviable_instance_is_disabled)
else:
instance._post_init()
node.instance = instance
for constraint, condition in instance._constraints:
self.post_discover(constraint, instance)
if condition:
node.implies(g.node_for(constraint),
why=why_instance_implies_its_constraints)
self.init_instance_providers(instance)
self.instance_nodes.append(node)
return node
def discover_all(self, initial_optuple):
self.post_discover(initial_optuple)
for optuple, origin in pop_iter(self._instantiation_queue,
pop_meth='popleft'):
self.instantiate(optuple, origin)
def init_pgraph_domains(self):
g = self.pgraph
for module, domain in iteritems(self._domains):
atom_for_module = partial(g.atom_for, module)
module_atom = atom_for_module()
for option, values in domain._iterpairs():
atom_for_option = partial(atom_for_module, option)
option_node = AtMostOne(g, map(atom_for_option, values),
why_one_operand_zero_implies_others_identity=
why_option_can_have_at_most_one_value,
why_identity_implies_all_operands_identity=
why_disabled_option_cannot_have_a_value,
why_all_operands_identity_implies_identity=
why_option_with_no_value_must_be_disabled)
module_atom.equivalent(option_node,
why_becauseof=why_option_implies_module,
why_therefore=why_module_implies_option)
def init_pgraph_providers(self):
g = self.pgraph
for module, providers in iteritems(self._providers):
module_atom = g.atom_for(module)
providers_node = AtMostOne(g,
(g.node_for(instance._optuple) for instance in providers),
why_one_operand_zero_implies_others_identity=
why_module_can_have_at_most_one_provider,
why_identity_implies_all_operands_identity=
why_not_included_module_cannot_have_a_provider,
why_all_operands_identity_implies_identity=
why_module_with_no_provider_must_not_be_included)
module_atom.equivalent(providers_node,
why_becauseof=why_another_module_provides_this,
why_therefore=why_module_must_be_provided_by_anything)
def resolve(self, initial_module):
optuple = initial_module()
self.discover_all(optuple)
self.init_pgraph_domains()
self.init_pgraph_providers()
solution = solve(self.pgraph, {self.pgraph.node_for(optuple): True})
instances = [node.instance
for node in self.instance_nodes if solution[node]]
instance_map = dict((type(instance), instance)
for instance in instances)
return instance_map
class ContextPgraph(Pgraph):
def __init__(self, context):
super(ContextPgraph, self).__init__()
self.context = context
def atom_for(self, module, option=None, value=Ellipsis):
if option is not None:
return self.new_node(OptionValueAtom, module, option, value)
else:
return self.new_node(ModuleAtom, module)
def node_for(self, mslice):
# TODO should accept arbitrary expr as well.
return self.new_node(OptupleNode, mslice())
@ContextPgraph.node_type
class ModuleAtom(Atom):
def __init__(self, module):
super(ModuleAtom, self).__init__()
self.module = module
# Firstly, to build a default provider since it might not be included
# explicitly
is_default = any(module == interface.default_provider
for interface in module.provides)
if is_default:
self[True].level = 0
self[False].level = 1 # then, try not to build a module
def __repr__(self):
return repr(self.module)
@ContextPgraph.node_type
class OptionValueAtom(Atom):
def __init__(self, module, option, value):
super(OptionValueAtom, self).__init__()
self.module = module
self.option = option
self.value = value
is_default = (value == module._optype(option).default)
if is_default:
# Whenever possible prefer default option value,
# but do it after a stage of disabling modules.
self[True].level = 2
def __repr__(self):
return repr(self.module(**{self.option: self.value}))
@ContextPgraph.node_type
class OptupleNode(And):
_optimize_new = True
@classmethod
def _new(cls, optuple):
new_atom = partial(cls.pgraph.atom_for, optuple._module)
option_atoms = tuple(starmap(new_atom, optuple._iterpairs()))
if not option_atoms:
return cls.pgraph.atom_for(optuple._module)
else:
return super(OptupleNode, cls)._new(option_atoms, optuple)
def __init__(self, option_atoms, optuple):
super(OptupleNode, self).__init__(option_atoms,
why_identity_implies_all_operands_identity=None, # TODO
why_all_operands_identity_implies_identity=None) # TODO
self.optuple = optuple
def __repr__(self):
return repr(self.optuple)
def why_option_can_have_at_most_one_value(outcome, *causes):
return 'option can have at most one value: %s: %s' % (outcome, causes)
def why_disabled_option_cannot_have_a_value(outcome, *causes):
return 'disabled option cannot have a value: %s: %s' % (outcome, causes)
def why_option_with_no_value_must_be_disabled(outcome, *causes):
return 'option with no value must be disabled: %s: %s' % (outcome, causes)
def why_option_implies_module(outcome, *causes):
return 'option implies module: %s: %s' % (outcome, causes)
def why_module_implies_option(outcome, *causes):
return 'module implies option: %s: %s' % (outcome, causes)
def why_module_can_have_at_most_one_provider(outcome, *causes):
return 'module can have at most one provider: %s: %s' % (outcome, causes)
def why_not_included_module_cannot_have_a_provider(outcome, *causes):
return 'not included module {0} cannot have a provider'.format(outcome)
def why_module_with_no_provider_must_not_be_included(outcome, *causes):
return 'module {0} has no provider and cannot be included'.format(outcome)
def why_another_module_provides_this(outcome, cause):
return 'module %s provided by %s' % (cause, outcome)
def why_module_must_be_provided_by_anything(outcome, cause):
node, value = outcome
if value and not node._operands:
return 'Nothing provides {module}'.format(module=cause)
return 'module {module} must be provided by anything'.format(module=cause)
def why_instance_implies_its_constraints(outcome, cause):
node, value = outcome
if value:
fmt = 'required by {cause.node}'
else:
fmt = '{node} disabled as a dependent of {cause.node}'
return fmt.format(**locals())
def why_inviable_instance_is_disabled(outcome, *_):
node, value = outcome
assert not value
fmt = '{node} is disabled because of an error: {node.error}'
return fmt.format(**locals())
def resolve(initial_module):
return Context().resolve(initial_module)
|
the-stack_0_25050
|
import pandas as pd
import torch
from project.feature_extraction import ImageFeatureExtractor
from pytorch_lightning import seed_everything
from torch import nn
seed_everything(42)
if torch.cuda.is_available:
device = "cpu"
else:
device = "cpu"
df = pd.DataFrame(
{
"paths": ["path1", "path2", "path3"],
0: [
"the quick brown fox jumps over the lazy dog",
"now is the time for all good folks",
"everything is fine",
],
1: [
"what do you mean",
"im sorry dave im afraid i cant do that",
"yes this is a test",
],
2: [
"on the internet no one knows youre a dog",
"i cant think of any more memes",
"this one describes a picture",
],
}
)
image_batch = torch.randn((16, 3, 224, 224), device=device)
caption_batch = torch.randint(1000, (16, 5, 30), device=device)
def test_image_extractor():
imgs = image_batch.clone()
ife_mobilenet = ImageFeatureExtractor().to(device)
for param in ife_mobilenet.encoder.parameters():
assert not param.requires_grad
imgs = ife_mobilenet.encoder(imgs)
assert imgs.shape == (16, 1280, 7, 7)
assert isinstance(ife_mobilenet.pooling, nn.AdaptiveAvgPool2d)
imgs = ife_mobilenet.pooling(imgs)
assert imgs.shape == (16, 1280, 1, 1)
assert isinstance(ife_mobilenet.projector, nn.Sequential)
assert isinstance(list(ife_mobilenet.projector.children())[0], nn.Linear)
imgs = imgs.view(16, -1)
imgs = ife_mobilenet.projector(imgs)
assert imgs.shape == (16, 128)
assert not ife_mobilenet.convolution
def test_image_extractor_conv():
imgs = image_batch.clone()
ife_mobilenet = ImageFeatureExtractor(
pooling=False, convolution_in="infer", projection_in=False,
)
for param in ife_mobilenet.encoder.parameters():
assert not param.requires_grad
imgs = ife_mobilenet.encoder(imgs)
assert imgs.shape == (16, 1280, 7, 7)
assert isinstance(ife_mobilenet.convolution, nn.Conv2d)
imgs = ife_mobilenet.convolution(imgs)
assert imgs.shape == (16, 128, 7, 7)
assert not ife_mobilenet.pooling
assert not ife_mobilenet.projector
|
the-stack_0_25051
|
import os
from tqdm import tqdm
os.environ['ET_ROOT'] = '/home/ubuntu/bak/ET'
os.environ['ET_DATA'] = os.path.join(os.environ['ET_ROOT'], 'data')
os.environ['ET_LOGS'] = os.path.join(os.environ['ET_ROOT'], 'logs')
import torch
import random
import numpy as np
from alfred.gen import constants
from alfred.model.learned import LearnedModel
from alfred.utils import data_util, helper_util, model_util
from alfred.model.train import prepare, create_model, load_data, wrap_datasets, process_vocabs
from alfred.utils.data_util import tensorize_and_pad
from alfred.utils import helper_util
from dataset_parser import Dataset
args = helper_util.AttrDict({
'seed': 1,
'resume': True,
'profile': False,
'batch': 8,
'epochs': 30,
'optimizer': 'adamw',
'weight_decay': 0.33,
'lr': {'init': 0.0001, 'profile': 'linear', 'decay_epoch': 10, 'decay_scale': 0.1, 'final': 1e-05,
'cycle_epoch_up': 0, 'cycle_epoch_down': 0, 'warmup_epoch': 0, 'warmup_scale': 1},
'action_loss_wt': 1.0,
'object_loss_wt': 1.0,
'subgoal_aux_loss_wt': 0,
'progress_aux_loss_wt': 0,
'entropy_wt': 0.0,
'demb': 768,
'encoder_heads': 12,
'encoder_layers': 2,
'num_input_actions': 1,
'encoder_lang': {'shared': True, 'layers': 2, 'pos_enc': True, 'instr_enc': False},
'decoder_lang': {'layers': 2, 'heads': 12, 'demb': 768, 'dropout': 0.1, 'pos_enc': True},
'detach_lang_emb': False,
'dropout': {'lang': 0.0, 'vis': 0.3, 'emb': 0.0,
'transformer': {'encoder': 0.1, 'action': 0.0}},
'enc': {'pos': True, 'pos_learn': False, 'token': False, 'dataset': False},
'name': 'try',
'model': 'transformer',
'device': 'cuda',
'num_workers': 10,
'pretrained_path': None,
'fast_epoch': False,
'data': {'train': ['lmdb_human_all'], 'valid': [], 'length': 30000, 'ann_type': ['lang']},
'dout': os.path.join(os.environ['ET_LOGS'], 'parser_v3'),
})
train_data_folder = args.data['train'][0]
dataset = Dataset(f"/home/ubuntu/bak/ET/data/{train_data_folder}", train_data_folder, args)
dataset.name = train_data_folder
loader_args = {
'num_workers': args.num_workers,
'batch_size': args.batch,
'drop_last': True,
'shuffle': True,
'collate_fn': helper_util.identity,
}
loader = torch.utils.data.DataLoader(dataset, **loader_args)
# assign vocabs to datasets and check their sizes for nn.Embeding inits
embs_ann, vocab_out = process_vocabs([dataset], args)
# create the model
model, optimizer, prev_train_info = create_model(args, embs_ann, vocab_out)
# optimizer
optimizer, schedulers = model_util.create_optimizer_and_schedulers(
prev_train_info['progress'], model.args, model.parameters(), optimizer)
print('Trainig stats will be saved at', model.args.dout)
if not os.path.exists(model.args.dout):
os.mkdir(model.args.dout)
for ep in range(args.epochs):
model.train()
losses_train_list = []
for batches in tqdm(loader):
traj_data, input_dict, gt_dict = tensorize_and_pad(
batches, model.args.device, model.pad)
model_out = model.model.forward(
dataset.vocab_in, action=gt_dict['action'], **input_dict)
losses_train = model.model.compute_batch_loss(model_out, gt_dict)
# do the gradient step
optimizer.zero_grad()
sum_loss = sum([v for v in losses_train.values()])
sum_loss.backward()
losses_train_list.append(sum_loss.item())
optimizer.step()
print("Epoch: {} train loss {:.3f}".format(ep, np.mean(losses_train_list)))
model_util.save_model(
model, f'model_{str(ep).zfill(2)}.pth', {}, optimizer=optimizer)
|
the-stack_0_25052
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayBossProdAlcagmprodAgreementUnsignModel(object):
def __init__(self):
self._agreement_id_list = None
self._cancel_date = None
self._out_sign_no = None
self._product_cd = None
self._request_from = None
self._request_no = None
self._user_id = None
@property
def agreement_id_list(self):
return self._agreement_id_list
@agreement_id_list.setter
def agreement_id_list(self, value):
if isinstance(value, list):
self._agreement_id_list = list()
for i in value:
self._agreement_id_list.append(i)
@property
def cancel_date(self):
return self._cancel_date
@cancel_date.setter
def cancel_date(self, value):
self._cancel_date = value
@property
def out_sign_no(self):
return self._out_sign_no
@out_sign_no.setter
def out_sign_no(self, value):
self._out_sign_no = value
@property
def product_cd(self):
return self._product_cd
@product_cd.setter
def product_cd(self, value):
self._product_cd = value
@property
def request_from(self):
return self._request_from
@request_from.setter
def request_from(self, value):
self._request_from = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.agreement_id_list:
if isinstance(self.agreement_id_list, list):
for i in range(0, len(self.agreement_id_list)):
element = self.agreement_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.agreement_id_list[i] = element.to_alipay_dict()
if hasattr(self.agreement_id_list, 'to_alipay_dict'):
params['agreement_id_list'] = self.agreement_id_list.to_alipay_dict()
else:
params['agreement_id_list'] = self.agreement_id_list
if self.cancel_date:
if hasattr(self.cancel_date, 'to_alipay_dict'):
params['cancel_date'] = self.cancel_date.to_alipay_dict()
else:
params['cancel_date'] = self.cancel_date
if self.out_sign_no:
if hasattr(self.out_sign_no, 'to_alipay_dict'):
params['out_sign_no'] = self.out_sign_no.to_alipay_dict()
else:
params['out_sign_no'] = self.out_sign_no
if self.product_cd:
if hasattr(self.product_cd, 'to_alipay_dict'):
params['product_cd'] = self.product_cd.to_alipay_dict()
else:
params['product_cd'] = self.product_cd
if self.request_from:
if hasattr(self.request_from, 'to_alipay_dict'):
params['request_from'] = self.request_from.to_alipay_dict()
else:
params['request_from'] = self.request_from
if self.request_no:
if hasattr(self.request_no, 'to_alipay_dict'):
params['request_no'] = self.request_no.to_alipay_dict()
else:
params['request_no'] = self.request_no
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBossProdAlcagmprodAgreementUnsignModel()
if 'agreement_id_list' in d:
o.agreement_id_list = d['agreement_id_list']
if 'cancel_date' in d:
o.cancel_date = d['cancel_date']
if 'out_sign_no' in d:
o.out_sign_no = d['out_sign_no']
if 'product_cd' in d:
o.product_cd = d['product_cd']
if 'request_from' in d:
o.request_from = d['request_from']
if 'request_no' in d:
o.request_no = d['request_no']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
the-stack_0_25053
|
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from stream_alert_cli.manage_lambda.package import ClassifierPackage
from stream_alert_cli.terraform.lambda_module import generate_lambda
def generate_classifier(cluster_name, cluster_dict, config):
"""Add this cluster's classifier module to the Terraform cluster dict.
Args:
cluster_name (str): The name of the currently generating cluster
cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
config (dict): The loaded config from the 'conf/' directory
JSON Input from the config:
"stream_alert": {
"classifier_config": {
"log_level": "info",
"log_retention_days": 14,
"memory": 128,
"metric_alarms": {
"errors": {
"enabled": true,
"evaluation_periods": 1,
"period_secs": 120,
"threshold": 0
},
"throttles": {
"enabled": true,
"evaluation_periods": 1,
"period_secs": 120,
"threshold": 0
}
},
"timeout": 60,
"vpc_config": {
"security_group_ids": [],
"subnet_ids": []
}
}
}
"""
classifier_config = (
config['clusters'][cluster_name]['modules']['stream_alert']['classifier_config']
)
tf_module_prefix = 'classifier_{}'.format(cluster_name)
iam_module = '{}_iam'.format(tf_module_prefix)
# Set variables for the alert merger's IAM permissions
cluster_dict['module'][iam_module] = {
'source': 'modules/tf_classifier',
'account_id': config['global']['account']['aws_account_id'],
'region': config['global']['account']['region'],
'function_role_id': '${{module.{}_lambda.role_id}}'.format(tf_module_prefix),
'function_alias_arn': '${{module.{}_lambda.function_alias_arn}}'.format(tf_module_prefix),
'function_name': '${{module.{}_lambda.function_name}}'.format(tf_module_prefix),
'classifier_sqs_queue_arn': '${module.globals.classifier_sqs_queue_arn}',
'classifier_sqs_queue_url': '${module.globals.classifier_sqs_queue_url}',
'classifier_sqs_sse_kms_key_arn': '${module.globals.classifier_sqs_sse_kms_key_arn}',
}
# Add Classifier input config from the loaded cluster file
input_config = classifier_config.get('inputs')
if input_config:
input_mapping = {
'input_sns_topics': 'aws-sns'
}
for tf_key, input_key in input_mapping.items():
if input_key in input_config:
cluster_dict['module'][iam_module][tf_key] = input_config[input_key]
# Set variables for the Lambda module
cluster_dict['module']['{}_lambda'.format(tf_module_prefix)] = generate_lambda(
'{}_streamalert_classifier_{}'.format(config['global']['account']['prefix'], cluster_name),
ClassifierPackage.package_name + '.zip',
ClassifierPackage.lambda_handler,
classifier_config,
config,
environment={
'CLUSTER': cluster_name,
'SQS_QUEUE_URL': '${module.globals.classifier_sqs_queue_url}',
},
tags={
'Cluster': cluster_name
},
)
|
the-stack_0_25055
|
import cv2
name = 'Matias' #replace with your name
cam = cv2.VideoCapture(0)
cv2.namedWindow("press space to take a photo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("press space to take a photo", 500, 300)
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("press space to take a photo", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = "dataset/"+ name +"/image_{}.jpg".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows()
|
the-stack_0_25058
|
import textwrap
import disnake
from disnake.ext.commands import BucketType, cooldown
from bot.bot import command, Context
from cogs.cog import Cog
from utils.utilities import wait_for_words
class Privacy(Cog):
def __init__(self, bot):
super().__init__(bot)
@command()
@cooldown(1, 5, BucketType.guild)
async def privacy(self, ctx):
can_embed = ctx.channel.permissions_for(ctx.guild.get_member(self.bot.user.id)).embed_links
d = '''
**Data**
This bot needs to collect data that the discord api provides
in order to function properly. This data consist mainly of your user id combined with other ids such as
server and role ids. Non moderation uses include the first date a user joined a server,
stats with the mute_roll command, commands you have used,
the date you last interacted in a server (e.g. by sending a message)
and image urls for image manipulation commands (only the url and channel are stored here).
Moderation uses include timeouts, reasons for the timeouts and mutes,
temproles, command permissions, saving roles and botbans.
Message ids are also saved in a handful of server for message purging reasons.
The bot also saves the latest image links sent in a channel for up to 1 day.
This is to make the image commands more convenient to use.
Message content is saved in memory for a short amount of time.
This is used for message delete and edit tracking for servers that have it enabled.
**Agreement**
By having this bot in your guild you agree to inform users that
this bot collects data that the discord api provides in order to function.
The creator of this bot is not responsible for any damage this bot causes
including but not limited to failure of a service this bot provides
You can get the support server invite with a command name `support` or use the command `delete_data` if you want
your data removed (not including moderation data) and command `do_not_track` will prevent the bot from saving data of you (does not include data used for moderation).
'''
d = textwrap.dedent(d).strip('\n')
if can_embed:
embed = disnake.Embed(title='Privacy statement', description=d)
await ctx.send(embed=embed)
else:
await ctx.send(d)
@command()
@cooldown(1, 10, BucketType.user)
async def delete_data(self, ctx: Context):
"""
Deletes your user data saved to the bot and prevents further data from being saved.
Moderation related data such as timeouts are not affected.
Server owners need to manually delete those.
Also does not delete do not track status if that has been set on.
"""
await ctx.send('You are about the delete your user data in the bot. This is irreversible. '
'Will not prevent further storage of data (see do_not_track command). Type confirm to continue.')
if not await wait_for_words(ctx, ['confirm'], timeout=30):
ctx.command.undo_use(ctx)
return
try:
await self.bot.dbutil.delete_user_data(ctx.author.id)
except:
await ctx.send('Failed to delete user data. Try again later.')
raise
await ctx.send('User data deleted')
@command()
@cooldown(1, 5, BucketType.user)
async def do_not_track(self, ctx, status: bool = None):
"""
Prevents the bot from passively saving some data of you in non moderation cases.
If you use the bot some data such as used commands and mute_roll statistics will still be saved.
"""
is_not_tracked = await self.bot.dbutil.do_not_track_is_on(ctx.author.id)
on_off = 'on' if is_not_tracked else 'off'
if status is None:
await ctx.send(f'Do not track is currently set {on_off}')
return
if status is is_not_tracked:
await ctx.send(f'Do not track is already set to {on_off}')
return
on_off = 'on' if status else 'off'
if await self.bot.dbutil.set_do_not_track(ctx.author.id, status):
await ctx.send(f'Do not track set to {on_off}. It should take into effect shortly.')
else:
await ctx.send(f'Failed to set do not track to {on_off}.')
def setup(bot):
bot.add_cog(Privacy(bot))
|
the-stack_0_25059
|
from datetime import timedelta
import os
import shutil
import string
import tempfile
import warnings
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import SessionStore as CookieSession
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.cache import get_cache
from django.core import management
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils import six
from django.utils import timezone
from django.utils import unittest
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail("The session object did not save properly. Middleware may be saving cache items without namespaces.")
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_actual_expiry(self):
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, Session.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, Session.objects.count())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, Session.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
def test_load_overlong_key(self):
# Some backends might issue a warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# This key should be refused and a new session should be created
self.assertTrue(self.backend("a\\b\\c").load())
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(SuspiciousOperation,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
def test_load_overlong_key(self):
# Some backends might issue a warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(get_cache('default').get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initalize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(get_cache('default').get(self.session.cache_key), None)
self.assertNotEqual(get_cache('sessions').get(self.session.cache_key), None)
class SessionMiddlewareTests(unittest.TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
class CookieSessionTests(SessionTestsMixin, TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
|
the-stack_0_25060
|
# -*- coding: utf-8 -*-
import dask.array
from . import _utils
def _binary_op(func,
image,
structure=None,
iterations=1,
mask=None,
origin=0,
brute_force=False,
**kwargs):
image = (image != 0)
structure = _utils._get_structure(image, structure)
iterations = _utils._get_iterations(iterations)
mask = _utils._get_mask(image, mask)
origin = _utils._get_origin(structure.shape, origin)
brute_force = _utils._get_brute_force(brute_force)
depth = _utils._get_depth(structure.shape, origin)
depth, boundary = _utils._get_depth_boundary(structure.ndim, depth, "none")
result = image
for i in range(iterations):
iter_result = result.map_overlap(
func,
depth=depth,
boundary=boundary,
dtype=bool,
meta=image._meta,
structure=structure,
origin=origin,
**kwargs
)
result = dask.array.where(mask, iter_result, result)
return result
|
the-stack_0_25061
|
from setuptools import setup, find_packages
import os
import sys
__version__ = '0.0.6'
__requirements__ = [
"boto3==1.2.3",
"click==6.2",
"pluginbase==0.3",
"PyYAML==3.11",
]
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
setup(
name='bluecanary',
version=__version__,
packages=find_packages(),
include_package_data=True,
install_requires=__requirements__,
entry_points='''
[console_scripts]
bluecanary=bluecanary.scripts.bluecanary:cli
''',
)
|
the-stack_0_25062
|
import datetime
from unittest.mock import Mock, patch
import pytest
from dateutil.relativedelta import relativedelta
from sunpy.data.test import rootdir
from sunpy.net.scraper import Scraper, get_timerange_from_exdict
from sunpy.time import TimeRange, parse_time
PATTERN_EXAMPLES = [
('%b%y', relativedelta(months=1)),
('%m%y', relativedelta(months=1)),
('%H%d', relativedelta(hours=1)),
('%y%b', relativedelta(months=1)),
]
def testDirectoryDatePattern():
s = Scraper('%Y/%m/%d/%Y%m%d_%H%M%S_59.fit.gz')
testpath = '2014/03/05/20140305_013000_59.fit.gz'
d = parse_time((2014, 3, 5, 1, 30))
assert s.matches(testpath, d)
def testDirectoryDatePatternFalse():
s = Scraper('%Y/%m/%d/%Y%m%d_%H%M%S_59.fit.gz')
testpath = '2013/03/05/20140305_013000_59.fit.gz'
d = parse_time((2014, 3, 5, 1, 30))
assert not s.matches(testpath, d)
def testDirectoryObsPattern():
s = Scraper('%y%m%d/{observatory}_%Y%m%d.fits', observatory='SDO')
testpath = '140305/SDO_20140305.fits'
d = parse_time((2014, 3, 5))
assert s.matches(testpath, d)
def testDirectoryRange():
s = Scraper('%Y/%m/%d/%Y%m%d_%H.fit.gz')
directory_list = ['2009/12/30/', '2009/12/31/', '2010/01/01/',
'2010/01/02/', '2010/01/03/']
timerange = TimeRange('2009-12-30', '2010-01-03')
assert s.range(timerange) == directory_list
def testDirectoryRegex():
# Test for Windows where '\' is a path separator and not part of the regex
s = Scraper('scheme://a.url.with/a/few/forward/slashes/andbacklash\\inthename.ext', regex=True)
timerange = TimeRange('2019-02-01', '2019-02-03')
directory = s.range(timerange)
assert directory == ['scheme://a.url.with/a/few/forward/slashes/']
def testDirectoryRangeFalse():
s = Scraper('%Y%m%d/%Y%m%d_%H.fit.gz')
directory_list = ['20091230/', '20091231/', '20100101/',
'20090102/', '20090103/']
timerange = TimeRange('2009/12/30', '2010/01/03')
assert s.range(timerange) != directory_list
def testNoDateDirectory():
s = Scraper('mySpacecraft/myInstrument/xMinutes/aaa%y%b.ext')
directory_list = ['mySpacecraft/myInstrument/xMinutes/']
timerange = TimeRange('2009/11/20', '2010/01/03')
assert s.range(timerange) == directory_list
@pytest.mark.parametrize('pattern, mintime', PATTERN_EXAMPLES)
def test_smallerPattern(pattern, mintime):
assert mintime == Scraper('')._smallerPattern(pattern)
def testDirectoryRangeHours():
s = Scraper('%Y%m%d_%H/%H%M.csv')
timerange = TimeRange('2009-12-31T23:40:00', '2010-01-01T01:15:00')
assert len(s.range(timerange)) == 3 # 3 directories (1 per hour)
def testDirectoryRange_single():
s = Scraper('%Y%m%d/%H_%M.csv')
startdate = parse_time((2010, 10, 10, 5, 0))
enddate = parse_time((2010, 10, 10, 7, 0))
timerange = TimeRange(startdate, enddate)
assert len(s.range(timerange)) == 1
def testDirectoryRange_Month():
s = Scraper('%Y%m/%d/%j_%H.txt')
startdate = parse_time((2008, 2, 20, 10))
enddate = parse_time((2008, 3, 2, 5))
timerange = TimeRange(startdate, enddate)
assert len(s.range(timerange)) == 12
startdate = parse_time((2009, 2, 20, 10))
enddate = parse_time((2009, 3, 2, 5))
timerange = TimeRange(startdate, enddate)
assert len(s.range(timerange)) == 11
def testNoDirectory():
s = Scraper('files/%Y%m%d_%H%M.dat')
startdate = parse_time((2010, 1, 10, 20, 30))
enddate = parse_time((2010, 1, 20, 20, 30))
timerange = TimeRange(startdate, enddate)
assert len(s.range(timerange)) == 1
def testExtractDates_usingPattern():
# Standard pattern
s = Scraper('data/%Y/%m/%d/fits/swap/swap_00174_fd_%Y%m%d_%H%M%S.fts.gz')
testURL = 'data/2014/05/14/fits/swap/swap_00174_fd_20140514_200135.fts.gz'
timeURL = parse_time((2014, 5, 14, 20, 1, 35))
assert s._extractDateURL(testURL) == timeURL
# Not-full repeated pattern
s = Scraper('data/%Y/fits/swap/swap_00174_fd_%Y%m%d_%H%M%S.fts.gz')
testURL = 'data/2014/fits/swap/swap_00174_fd_20140514_200135.fts.gz'
timeURL = parse_time((2014, 5, 14, 20, 1, 35))
assert s._extractDateURL(testURL) == timeURL
def testExtractDates_notSeparators():
s = Scraper('data/%Y/%m/swap%m%d_%H%M%S')
testURL = 'data/2014/05/swap0514_200135'
timeURL = parse_time((2014, 5, 14, 20, 1, 35))
assert s._extractDateURL(testURL) == timeURL
def testExtractDates_notSeparators_andSimilar():
s = Scraper('data/%Y/Jun%b%d_%H%M%S')
testURL = 'data/2014/JunJun14_200135'
timeURL = parse_time((2014, 6, 14, 20, 1, 35))
assert s._extractDateURL(testURL) == timeURL
testURL = 'data/2014/JunMay14_200135'
timeURL = parse_time((2014, 5, 14, 20, 1, 35))
assert s._extractDateURL(testURL) == timeURL
# and testing with the month afterwards
s = Scraper('data/%Y/%dJun%b_%H%M%S')
testURL = 'data/2014/14JunJun_200135'
timeURL = parse_time((2014, 6, 14, 20, 1, 35))
assert s._extractDateURL(testURL) == timeURL
def testURL_pattern():
s = Scraper('fd_%Y%m%d_%H%M%S.fts')
assert s._URL_followsPattern('fd_20130410_231211.fts')
assert not s._URL_followsPattern('fd_20130410_231211.fts.gz')
assert not s._URL_followsPattern('fd_20130410_ar_231211.fts.gz')
def testURL_patternMillisecondsGeneric():
s = Scraper('fd_%Y%m%d_%H%M%S_%e.fts')
assert s._URL_followsPattern('fd_20130410_231211_119.fts')
assert not s._URL_followsPattern('fd_20130410_231211.fts.gz')
assert not s._URL_followsPattern('fd_20130410_ar_231211.fts.gz')
def testURL_patternMillisecondsZeroPadded():
# Asserts solution to ticket #1954.
# Milliseconds must be zero-padded in order to match URL lengths.
now_mock = Mock(return_value=datetime.datetime(2019, 4, 19, 0, 0, 0, 4009))
with patch('sunpy.net.scraper.datetime', now=now_mock):
s = Scraper('fd_%Y%m%d_%H%M%S_%e.fts')
now_mock.assert_called_once()
assert s.now == 'fd_20190419_000000_004.fts'
def testFilesRange_sameDirectory_local():
s = Scraper('/'.join(['file:/', str(rootdir),
'EIT_header', 'efz%Y%m%d.%H%M%S_s.header']))
startdate = parse_time((2004, 3, 1, 4, 0))
enddate = parse_time((2004, 3, 1, 6, 30))
assert len(s.filelist(TimeRange(startdate, enddate))) == 3
startdate = parse_time((2010, 1, 10, 20, 30))
enddate = parse_time((2010, 1, 20, 20, 30))
assert len(s.filelist(TimeRange(startdate, enddate))) == 0
@pytest.mark.remote_data
def testFilesRange_sameDirectory_remote():
pattern = ('http://solarmonitor.org/data/%Y/%m/%d/'
'fits/{instrument}/'
'{instrument}_00174_fd_%Y%m%d_%H%M%S.fts.gz')
s = Scraper(pattern, instrument='swap')
startdate = parse_time((2014, 5, 14, 0, 0))
enddate = parse_time((2014, 5, 14, 6, 30))
timerange = TimeRange(startdate, enddate)
assert len(s.filelist(timerange)) == 2
startdate = parse_time((2014, 5, 14, 21, 0))
enddate = parse_time((2014, 5, 14, 23, 30))
timerange = TimeRange(startdate, enddate)
assert len(s.filelist(timerange)) == 0
@pytest.mark.remote_data
def testFilesRange_sameDirectory_months_remote():
pattern = ('http://www.srl.caltech.edu/{spacecraft}/DATA/{instrument}/'
'Ahead/1minute/AeH%y%b.1m')
s = Scraper(pattern, spacecraft='STEREO', instrument='HET')
startdate = parse_time((2007, 8, 1))
enddate = parse_time((2007, 9, 10))
timerange = TimeRange(startdate, enddate)
assert len(s.filelist(timerange)) == 2
@pytest.mark.remote_data
def test_ftp():
pattern = 'ftp://solar-pub.nao.ac.jp/pub/nsro/norh/data/tcx/%Y/%m/tca%y%m%d'
s = Scraper(pattern)
timerange = TimeRange('2016/5/18 15:28:00', '2016/5/20 16:30:50')
urls = s.filelist(timerange)
assert urls[0] == ('ftp://solar-pub.nao.ac.jp'
'/pub/nsro/norh/data/tcx/2016/05/tca160519')
assert len(urls) == 2
@pytest.mark.remote_data
def test_filelist_url_missing_directory():
# Asserts solution to ticket #2684.
# Attempting to access data for the year 1960 results in a 404, so no files are returned.
pattern = 'http://lasp.colorado.edu/eve/data_access/evewebdataproducts/level2/%Y/%j/'
s = Scraper(pattern)
timerange = TimeRange('1960/01/01 00:00:00', '1960/01/02 00:00:00')
assert len(s.filelist(timerange)) == 0
@pytest.mark.remote_data
def test_filelist_relative_hrefs():
# the url opened by the scraper from below pattern contains some links which don't have hrefs
pattern = 'http://www.bbso.njit.edu/pub/archive/%Y/%m/%d/bbso_halph_fr_%Y%m%d_%H%M%S.fts'
s = Scraper(pattern)
timerange = TimeRange('2016/5/18 15:28:00', '2016/5/18 16:30:00')
assert s.domain == 'http://www.bbso.njit.edu/'
# hrefs are relative to domain here, not to the directory they are present in
# this checks that `scraper.filelist` returns fileurls relative to the domain
fileurls = s.filelist(timerange)
assert fileurls[1] == s.domain + 'pub/archive/2016/05/18/bbso_halph_fr_20160518_160033.fts'
@pytest.mark.parametrize('pattern, check_file', [
(r'MyFile_%Y_%M_%e\.(\D){2}\.fits', 'MyFile_2020_55_234.aa.fits'),
(r'(\d){5}_(\d){2}\.fts', '01122_25.fts'),
(r'_%Y%m%d__%ec(\d){5}_(\d){2}\s.fts', '_20201535__012c12345_33 .fts')])
def test_regex(pattern, check_file):
s = Scraper(pattern, regex=True)
assert s._URL_followsPattern(check_file)
@pytest.mark.remote_data
def test_regex_data():
prefix = r'https://gong2.nso.edu/oQR/zqs/'
pattern = prefix + r'%Y%m/mrzqs%y%m%d/mrzqs%y%m%dt%H%Mc(\d){4}_(\d){3}\.fits.gz'
s = Scraper(pattern, regex=True)
timerange = TimeRange('2020-01-05', '2020-01-06T16:00:00')
assert s._URL_followsPattern(prefix + '202001/mrzqs200106/mrzqs200106t1514c2226_297.fits.gz')
assert len(s.filelist(timerange)) == 37
@pytest.mark.remote_data
def test_extract_files_meta():
baseurl0 = r'ftp://solar-pub.nao.ac.jp/pub/nsro/norh/data/tcx/%Y/%m/(\w){3}%y%m%d'
extractpattern0 = '{}/tcx/{year:4d}/{month:2d}/{wave}{:4d}{day:2d}'
s0 = Scraper(baseurl0, regex=True)
timerange0 = TimeRange('2020/1/1 4:00', '2020/1/2')
matchdict = {'wave': ['tca', 'tcz']}
metalist0 = s0._extract_files_meta(timerange0, extractpattern0, matcher=matchdict)
assert metalist0[0]['wave'] == 'tca'
assert metalist0[3]['wave'] == 'tcz'
assert metalist0[1]['day'] == 2
prefix = r'https://gong2.nso.edu/oQR/zqs/'
baseurl1 = prefix + r'%Y%m/mrzqs%y%m%d/mrzqs%y%m%dt%H%Mc(\d){4}_(\d){3}\.fits.gz'
extractpattern1 = ('{}/zqs/{year:4d}{month:2d}/mrzqs{:4d}{day:2d}/mrzqs{:6d}t'
'{hour:2d}{minute:2d}c{CAR_ROT:4d}_{:3d}.fits.gz')
s1 = Scraper(baseurl1, regex=True)
timerange1 = TimeRange('2020-01-05', '2020-01-05T16:00:00')
metalist1 = s1._extract_files_meta(timerange1, extractpattern1)
urls = s1.filelist(timerange1)
assert metalist1[3]['CAR_ROT'] == 2226
assert metalist1[-1]['url'] == urls[-1]
@pytest.mark.parametrize('exdict, start, end', [
({"year": 2000}, '2000-01-01 00:00:00', '2000-12-31 23:59:59.999000'),
({"year": 2016, "month": 2}, '2016-02-01 00:00:00', '2016-02-29 23:59:59.999000'),
({'year': 2019, 'month': 2, 'day': 28}, '2019-02-28 00:00:00', '2019-02-28 23:59:59.999000'),
({'year': 2020, 'month': 7, 'day': 31, 'hour': 23, 'minute': 59, 'second': 59},
'2020-07-31 23:59:59', '2020-07-31 23:59:59.999000')])
def test_get_timerange_with_extractor(exdict, start, end):
tr = TimeRange(start, end)
file_timerange = get_timerange_from_exdict(exdict)
assert file_timerange == tr
|
the-stack_0_25065
|
# Very Simple CLI example to get indicator details from Alienvault OTX
from OTXv2 import OTXv2
import IndicatorTypes
import argparse
# Your API key
API_KEY = ''
OTX_SERVER = 'https://otx.alienvault.com/'
otx = OTXv2(API_KEY, server=OTX_SERVER)
parser = argparse.ArgumentParser(description='OTX CLI Example')
parser.add_argument('-i', '--ip', help='IP eg; 4.4.4.4', required=False)
parser.add_argument(
'-d', '--domain', help='Domain eg; alienvault.com', required=False)
parser.add_argument('-ho', '--hostname',
help='Hostname eg; www.alienvault.com', required=False)
parser.add_argument(
'-u', '--url', help='URL eg; http://www.alienvault.com', required=False)
parser.add_argument(
'-m', '--md5', help='MD5 Hash of a file eg; 7b42b35832855ab4ff37ae9b8fa9e571', required=False)
parser.add_argument(
'-p', '--pulse', help='Search pulses for a string eg; Dridex', required=False)
parser.add_argument('-s', '--subscribed', help='Get pulses you are subscribed to',
required=False, action='store_true')
args = vars(parser.parse_args())
if args["ip"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.IPv4, args["ip"])))
if args["domain"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.DOMAIN, args["domain"])))
if args["hostname"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.HOSTNAME, args["hostname"])))
if args["url"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.URL, args["url"])))
if args["md5"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.FILE_HASH_MD5, args["md5"])))
if args["pulse"]:
result = otx.search_pulses(args["pulse"])
print (str(result.get('results')))
if args["subscribed"]:
print (str(otx.getall(max_page=3, limit=5)))
|
the-stack_0_25070
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sets up TFMA package for Jupyter notebook integration.
The widget is based on the template generated from jupyter-widget's
widget-cookiecutter.
"""
import os
import platform
import subprocess
import sys
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_py import build_py as _build_py
from setuptools.command.develop import develop as _develop
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
# pylint: disable=g-bad-import-order
# It is recommended to import setuptools prior to importing distutils to avoid
# using legacy behavior from distutils.
# https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0
from distutils import log
from distutils import spawn
# pylint: enable=g-bad-import-order
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists('../src/protoc'):
protoc = '../src/protoc'
elif os.path.exists('../src/protoc.exe'):
protoc = '../src/protoc.exe'
elif os.path.exists('../vsprojects/Debug/protoc.exe'):
protoc = '../vsprojects/Debug/protoc.exe'
elif os.path.exists('../vsprojects/Release/protoc.exe'):
protoc = '../vsprojects/Release/protoc.exe'
else:
protoc = spawn.find_executable('protoc')
# Get version from version module.
with open('tensorflow_model_analysis/version.py') as fp:
globals_dict = {}
exec(fp.read(), globals_dict) # pylint: disable=exec-used
__version__ = globals_dict['VERSION']
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, 'tensorflow_model_analysis', 'notebook',
'jupyter', 'js')
is_repo = os.path.exists(os.path.join(here, '.git'))
npm_path = os.pathsep.join([
os.path.join(node_root, 'node_modules', '.bin'),
os.environ.get('PATH', os.defpath),
])
# Set this to true if ipywidgets js should be built. This would require nodejs.
build_js = os.environ.get('BUILD_JS') is not None
log.set_verbosity(log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
def generate_proto(source, require=True):
"""Invokes the Protocol Compiler to generate a _pb2.py."""
# Does nothing if the output already exists and is newer than
# the input.
if not require and not os.path.exists(source):
return
output = source.replace('.proto', '_pb2.py').replace('../src/', '')
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print('Generating %s...' % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
'protoc is not installed nor found in ../src. Please compile it '
'or install the binary package.\n')
sys.exit(-1)
protoc_command = [protoc, '-I../src', '-I.', '--python_out=.', source]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def generate_tfma_protos():
"""Generate necessary .proto file if it doesn't exist."""
generate_proto('tensorflow_model_analysis/proto/config.proto', False)
generate_proto('tensorflow_model_analysis/proto/metrics_for_slice.proto',
False)
generate_proto('tensorflow_model_analysis/proto/validation_result.proto',
False)
class build_py(_build_py): # pylint: disable=invalid-name
"""Build necessary dependencies."""
def run(self):
generate_tfma_protos()
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
class develop(_develop): # pylint: disable=invalid-name
"""Build necessary dependencies in develop mode."""
def run(self):
generate_tfma_protos()
_develop.run(self)
def js_prerelease(command, strict=False):
"""Decorator for building minified js/css prior to another command."""
class DecoratedCommand(command):
"""Decorated command."""
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e: # pylint: disable=broad-except
missing = [t for t in jsdeps.targets if not os.path.exists(t)]
if strict or missing:
log.warn('rebuilding js and css failed')
if missing:
log.error('missing files: %s' % missing)
raise e
else:
log.warn('rebuilding js and css failed (not a problem)')
log.warn(str(e))
command.run(self)
update_package_data(self.distribution)
return DecoratedCommand
def update_package_data(distribution):
"""update package_data to catch changes during setup."""
build_py_cmd = distribution.get_command_obj('build_py')
# distribution.package_data = find_package_data()
# re-init build_py options which load package_data
build_py_cmd.finalize_options()
class NPM(Command):
"""NPM builder.
Builds the js and css using npm.
"""
description = 'install package.json dependencies using npm'
user_options = []
node_modules = os.path.join(node_root, 'node_modules')
targets = [
os.path.join(here, 'tensorflow_model_analysis', 'static', 'extension.js'),
os.path.join(here, 'tensorflow_model_analysis', 'static', 'index.js'),
os.path.join(here, 'tensorflow_model_analysis', 'static',
'vulcanized_tfma.js'),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def get_npm_name(self):
npm_name = 'npm'
if platform.system() == 'Windows':
npm_name = 'npm.cmd'
return npm_name
def has_npm(self):
npm_name = self.get_npm_name()
try:
subprocess.check_call([npm_name, '--version'])
return True
except: # pylint: disable=bare-except
return False
def should_run_npm_install(self):
return self.has_npm()
def run(self):
if not build_js:
return
has_npm = self.has_npm()
if not has_npm:
log.error(
"`npm` unavailable. If you're running this command using sudo, make"
' sure `npm` is available to sudo')
env = os.environ.copy()
env['PATH'] = npm_path
if self.should_run_npm_install():
log.info(
'Installing build dependencies with npm. This may take a while...')
npm_name = self.get_npm_name()
subprocess.check_call([npm_name, 'install'],
cwd=node_root,
stdout=sys.stdout,
stderr=sys.stderr)
os.utime(self.node_modules, None)
for t in self.targets:
if not os.path.exists(t):
msg = 'Missing file: %s' % t
if not has_npm:
msg += ('\nnpm is required to build a development version of a widget'
' extension')
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
def _make_extra_packages_tfjs():
# Packages needed for tfjs.
return [
'tensorflowjs>=3.6.0,<4',
]
def select_constraint(default, nightly=None, git_master=None):
"""Select dependency constraint based on TFX_DEPENDENCY_SELECTOR env var."""
selector = os.environ.get('TFX_DEPENDENCY_SELECTOR')
if selector == 'UNCONSTRAINED':
return ''
elif selector == 'NIGHTLY' and nightly is not None:
return nightly
elif selector == 'GIT_MASTER' and git_master is not None:
return git_master
else:
return default
# Get the long description from the README file.
with open('README.md') as fp:
_LONG_DESCRIPTION = fp.read()
setup_args = {
'name': 'tensorflow_model_analysis',
'version': __version__,
'description': 'A library for analyzing TensorFlow models',
'long_description': _LONG_DESCRIPTION,
'long_description_content_type': 'text/markdown',
'include_package_data': True,
'data_files': [('share/jupyter/nbextensions/tensorflow_model_analysis', [
'tensorflow_model_analysis/static/extension.js',
'tensorflow_model_analysis/static/index.js',
'tensorflow_model_analysis/static/index.js.map',
'tensorflow_model_analysis/static/vulcanized_tfma.js',
]),],
# Make sure to sync the versions of common dependencies (numpy, six, and
# protobuf) with TF.
'install_requires': [
# Sort alphabetically
'absl-py>=0.9,<0.13',
'apache-beam[gcp]>=2.32,<3',
'ipython>=7,<8',
'ipywidgets>=7,<8',
'numpy>=1.16,<1.20',
'pandas>=1.0,<2',
'protobuf>=3.13,<4',
'pyarrow>=1,<3',
'scipy>=1.4.1,<2',
'six>=1.12,<2',
'tensorflow>=1.15.2,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3',
'tensorflow-metadata' + select_constraint(
default='>=1.2.0,<1.3.0',
nightly='>=1.3.0.dev',
git_master='@git+https://github.com/tensorflow/metadata@master'),
'tfx-bsl' + select_constraint(
default='>=1.3.0,<1.4.0',
nightly='>=1.4.0.dev',
git_master='@git+https://github.com/tensorflow/tfx-bsl@master'),
],
'extras_require': {
'all': _make_extra_packages_tfjs(),
},
'python_requires': '>=3.7,<4',
'packages': find_packages(),
'zip_safe': False,
'cmdclass': {
'build_py': js_prerelease(build_py),
'develop': js_prerelease(develop),
'egg_info': js_prerelease(egg_info),
'sdist': js_prerelease(sdist, strict=True),
'jsdeps': NPM,
},
'author': 'Google LLC',
'author_email': '[email protected]',
'license': 'Apache 2.0',
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
'namespace_packages': [],
'requires': [],
'keywords': 'tensorflow model analysis tfx',
'url': 'https://www.tensorflow.org/tfx/model_analysis/get_started',
'download_url': 'https://github.com/tensorflow/model-analysis/tags',
}
setup(**setup_args)
|
the-stack_0_25071
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add_l7_tables
Revision ID: 3543deab1547
Revises: 6aee0434f911
Create Date: 2015-02-05 10:50:15.606420
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3543deab1547'
down_revision = '6aee0434f911'
l7rule_type = sa.Enum("HOST_NAME", "PATH", "FILE_TYPE", "HEADER", "COOKIE",
name="l7rule_typesv2")
l7rule_compare_type = sa.Enum("REGEX", "STARTS_WITH", "ENDS_WITH", "CONTAINS",
"EQUAL_TO", name="l7rule_compare_typesv2")
l7policy_action_type = sa.Enum("REJECT", "REDIRECT_TO_URL", "REDIRECT_TO_POOL",
name="l7policy_action_typesv2")
def upgrade():
op.create_table(
u'lbaas_l7policies',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'listener_id', sa.String(36), nullable=False),
sa.Column(u'action', l7policy_action_type, nullable=False),
sa.Column(u'redirect_pool_id', sa.String(36), nullable=True),
sa.Column(u'redirect_url', sa.String(255), nullable=True),
sa.Column(u'position', sa.Integer, nullable=False),
sa.Column(u'provisioning_status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.ForeignKeyConstraint([u'listener_id'],
[u'lbaas_listeners.id']),
sa.ForeignKeyConstraint([u'redirect_pool_id'],
[u'lbaas_pools.id'])
)
op.create_table(
u'lbaas_l7rules',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'l7policy_id', sa.String(36), nullable=False),
sa.Column(u'type', l7rule_type, nullable=False),
sa.Column(u'compare_type', l7rule_compare_type, nullable=False),
sa.Column(u'invert', sa.Boolean(), nullable=False),
sa.Column(u'key', sa.String(255), nullable=True),
sa.Column(u'value', sa.String(255), nullable=False),
sa.Column(u'provisioning_status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.ForeignKeyConstraint([u'l7policy_id'],
[u'lbaas_l7policies.id'])
)
|
the-stack_0_25072
|
#!/usr/bin/env python
"""
Pymodbus Asynchronous Client Examples
--------------------------------------------------------------------------
The following is an example of how to use the asynchronous modbus
client implementation from pymodbus with ayncio.
The example is only valid on Python3.4 and above
"""
from pymodbus.compat import IS_PYTHON3, PYTHON_VERSION
if IS_PYTHON3 and PYTHON_VERSION >= (3, 4):
import asyncio
import logging
# ----------------------------------------------------------------------- #
# Import the required asynchronous client
# ----------------------------------------------------------------------- #
from pymodbus.client.asynchronous.udp import AsyncModbusUDPClient as ModbusClient
# from pymodbus.client.asynchronous.udp import (
# AsyncModbusUDPClient as ModbusClient)
from pymodbus.client.asynchronous import schedulers
else:
import sys
sys.stderr("This example needs to be run only on python 3.4 and above")
sys.exit(1)
from threading import Thread
import time
# --------------------------------------------------------------------------- #
# configure the client logging
# --------------------------------------------------------------------------- #
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# --------------------------------------------------------------------------- #
# specify slave to query
# --------------------------------------------------------------------------- #
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
# --------------------------------------------------------------------------- #
UNIT = 0x01
async def start_async_test(client):
# ----------------------------------------------------------------------- #
# specify slave to query
# ----------------------------------------------------------------------- #
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
# ----------------------------------------------------------------------- #
log.debug("Reading Coils")
rr = await client.read_coils(1, 1, unit=0x01)
# ----------------------------------------------------------------------- #
# example requests
# ----------------------------------------------------------------------- #
# simply call the methods that you would like to use. An example session
# is displayed below along with some assert checks. Note that some modbus
# implementations differentiate holding/input discrete/coils and as such
# you will not be able to write to these, therefore the starting values
# are not known to these tests. Furthermore, some use the same memory
# blocks for the two sets, so a change to one is a change to the other.
# Keep both of these cases in mind when testing as the following will
# _only_ pass with the supplied asynchronous modbus server (script supplied).
# ----------------------------------------------------------------------- #
log.debug("Write to a Coil and read back")
rq = await client.write_coil(0, True, unit=UNIT)
rr = await client.read_coils(0, 1, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rr.bits[0] == True) # test the expected value
log.debug("Write to multiple coils and read back- test 1")
rq = await client.write_coils(1, [True]*8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
rr = await client.read_coils(1, 21, unit=UNIT)
assert(rr.function_code < 0x80) # test that we are not an error
resp = [True]*21
# If the returned output quantity is not a multiple of eight,
# the remaining bits in the final data byte will be padded with zeros
# (toward the high order end of the byte).
resp.extend([False]*3)
assert(rr.bits == resp) # test the expected value
log.debug("Write to multiple coils and read back - test 2")
rq = await client.write_coils(1, [False]*8, unit=UNIT)
rr = await client.read_coils(1, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rr.bits == [False]*8) # test the expected value
log.debug("Read discrete inputs")
rr = await client.read_discrete_inputs(0, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
log.debug("Write to a holding register and read back")
rq = await client.write_register(1, 10, unit=UNIT)
rr = await client.read_holding_registers(1, 1, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rr.registers[0] == 10) # test the expected value
log.debug("Write to multiple holding registers and read back")
rq = await client.write_registers(1, [10]*8, unit=UNIT)
rr = await client.read_holding_registers(1, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rr.registers == [10]*8) # test the expected value
log.debug("Read input registers")
rr = await client.read_input_registers(1, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
arguments = {
'read_address': 1,
'read_count': 8,
'write_address': 1,
'write_registers': [20]*8,
}
log.debug("Read write registeres simulataneously")
rq = await client.readwrite_registers(unit=UNIT, **arguments)
rr = await client.read_holding_registers(1, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rq.registers == [20]*8) # test the expected value
assert(rr.registers == [20]*8) # test the expected value
await asyncio.sleep(1)
def run_with_no_loop():
"""
ModbusClient Factory creates a loop.
:return:
"""
log.debug("---------------------RUN_WITH_NO_LOOP-----------------")
loop, client = ModbusClient(schedulers.ASYNC_IO, host='10.0.0.1', port=5020)
loop.run_until_complete(start_async_test(client.protocol))
loop.close()
log.debug("--------DONE RUN_WITH_NO_LOOP-------------")
log.debug("")
if __name__ == '__main__':
# Run with No loop
log.debug("Running Async client")
log.debug("------------------------------------------------------")
run_with_no_loop()
log.debug("")
|
the-stack_0_25076
|
import os
import socket
CONFIG_PATH = '/var/opt/haproxy-local.cfg'
PID_PATH = '/var/run/haproxy-local.pid'
CONFIG_HEADER = '''
global
daemon
maxconn 1024
defaults
mode tcp
timeout connect 7s
timeout server 24d
timeout client 24d
'''
def _is_ip(hostname):
try:
socket.inet_aton(hostname)
return True
except socket.error:
return False
def generate_config_from_mapping(port_to_addresses):
result = CONFIG_HEADER
for port, addresses in port_to_addresses.items():
result += '\tlisten service_{port}\n'.format(**locals())
result += '\t\tbind :::{port} v4v6\n'.format(**locals())
if not addresses:
result += '\t\ttcp-request connection reject\n'
else:
result += _make_server_config(addresses)
result += '\n'
return result
def _make_server_config(addresses):
result = ""
for i, address in enumerate(addresses):
protocol, host = address.split("://", 2) if "://" in address else ("", address)
result += '\t\tserver server_{i} {host} maxconn 128\n'.format(**locals())
hostname = host.split(':')[0]
if protocol == 'http':
result += '\t\thttp-request del-header Proxy\n'
if not _is_ip(hostname):
result += '\t\thttp-request set-header Host {}\n'.format(host)
result += '\t\tmode {}\n'.format(protocol)
return result
def put_config(config):
with open(CONFIG_PATH, 'w') as haproxy_config_file:
haproxy_config_file.write(config)
def restart():
try:
with open(PID_PATH, 'r') as pid_file:
pid = pid_file.read()
except IOError:
pid = None
command = 'haproxy -f {config_path} -p {pid_path}'.format(config_path=CONFIG_PATH, pid_path=PID_PATH)
if pid:
command += ' -sf {pid}'.format(pid=pid)
os.system(command)
def update_from_mapping(port_to_addresses):
new_config = generate_config_from_mapping(port_to_addresses)
put_config(new_config)
restart()
|
the-stack_0_25077
|
import importlib
import os
from functools import partial
from qtpy.QtCore import QSize, Qt, QThread
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import QGridLayout, QMainWindow, QMessageBox, QProgressBar, QToolButton, QWidget
from PartSeg import MASK_NAME, SEGMENTATION_NAME
from PartSeg.common_backend.base_settings import BaseSettings
from PartSeg.common_backend.load_backup import import_config
from PartSeg.common_gui.main_window import BaseMainWindow
from PartSegData import icons_dir
from PartSegImage import TiffImageReader
class Prepare(QThread):
def __init__(self, module):
super().__init__()
self.module = module
self.result = None
self.errors = []
def run(self):
if self.module != "":
from .. import plugins
plugins.register()
main_window_module = importlib.import_module(self.module)
main_window: BaseMainWindow = main_window_module.MainWindow
settings: BaseSettings = main_window.get_setting_class()(main_window_module.CONFIG_FOLDER)
self.errors = settings.load()
reader = TiffImageReader()
im = reader.read(main_window.initial_image_path)
im.file_path = ""
self.result = partial(main_window, settings=settings, initial_image=im)
class MainWindow(QMainWindow):
def __init__(self, title):
super().__init__()
self.setWindowTitle(title)
self.lib_path = ""
self.final_title = ""
analysis_icon = QIcon(os.path.join(icons_dir, "icon.png"))
stack_icon = QIcon(os.path.join(icons_dir, "icon_stack.png"))
self.analysis_button = QToolButton(self)
self.analysis_button.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
self.analysis_button.setIcon(analysis_icon)
# TODO use more general solution for text wrapping
self.analysis_button.setText(SEGMENTATION_NAME.replace(" ", "\n"))
self.analysis_button.setIconSize(QSize(100, 100))
self.mask_button = QToolButton(self)
self.mask_button.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
self.mask_button.setIcon(stack_icon)
self.mask_button.setText(MASK_NAME.replace(" ", "\n"))
self.mask_button.setIconSize(QSize(100, 100))
self.analysis_button.clicked.connect(self.launch_analysis)
self.mask_button.clicked.connect(self.launch_mask)
self.progress = QProgressBar()
self.progress.setHidden(True)
layout = QGridLayout()
layout.addWidget(self.progress, 0, 0, 1, 2)
layout.addWidget(self.analysis_button, 1, 1)
layout.addWidget(self.mask_button, 1, 0)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
self.setWindowIcon(analysis_icon)
self.prepare = None
self.wind = None
def _launch_begin(self):
self.progress.setVisible(True)
self.progress.setRange(0, 0)
self.analysis_button.setDisabled(True)
self.mask_button.setDisabled(True)
import_config()
def launch_analysis(self):
self._launch_begin()
self.lib_path = "PartSeg.segmentation_analysis.main_window"
self.final_title = "PartSeg Segmentation Analysis"
self.prepare = Prepare(self.lib_path)
self.prepare.finished.connect(self.launch)
self.prepare.start()
def launch_mask(self):
self._launch_begin()
self.lib_path = "PartSeg.segmentation_mask.main_window"
self.final_title = "PartSeg Mask Segmentation"
self.prepare = Prepare(self.lib_path)
self.prepare.finished.connect(self.launch)
self.prepare.start()
def window_shown(self):
self.close()
def launch(self):
if self.prepare.result is None:
self.close()
return
if self.prepare.errors:
errors_message = QMessageBox()
errors_message.setText("There are errors during start")
errors_message.setInformativeText(
"During load saved state some of data could not be load properly\n"
"The files has prepared backup copies in state directory (Help > State directory)"
)
errors_message.setStandardButtons(QMessageBox.Ok)
text = "\n".join(["File: " + x[0] + "\n" + str(x[1]) for x in self.prepare.errors])
errors_message.setDetailedText(text)
errors_message.exec()
wind = self.prepare.result(title=self.final_title, signal_fun=self.window_shown)
wind.show()
self.wind = wind
|
the-stack_0_25078
|
# Required modules and packages
import cv2 as cv
import mediapipe as mp
import time
import utils, math
import numpy as np
import pygame
from pygame import mixer
# variables
frame_counter =0
CEF_COUNTER =0
TOTAL_BLINKS =0
start_voice= False
counter_right=0
counter_left =0
counter_center =0
# constants
CLOSED_EYES_FRAME =3
FONTS = cv.FONT_HERSHEY_COMPLEX
# initialize mixer
mixer.init()
voice_center = mixer.Sound('center.wav')
# face boundaries indices
FACE_OVAL=[ 10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288, 397, 365, 379, 378, 400, 377, 152, 148, 176, 149, 150, 136, 172, 58, 132, 93, 234, 127, 162, 21, 54, 103,67, 109]
# lips indices for Landmarks
LIPS=[ 61, 146, 91, 181, 84, 17, 314, 405, 321, 375,291, 308, 324, 318, 402, 317, 14, 87, 178, 88, 95,185, 40, 39, 37,0 ,267 ,269 ,270 ,409, 415, 310, 311, 312, 13, 82, 81, 42, 183, 78 ]
LOWER_LIPS =[61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 308, 324, 318, 402, 317, 14, 87, 178, 88, 95]
UPPER_LIPS=[ 185, 40, 39, 37,0 ,267 ,269 ,270 ,409, 415, 310, 311, 312, 13, 82, 81, 42, 183, 78]
# Left eyes indices
LEFT_EYE =[ 362, 382, 381, 380, 374, 373, 390, 249, 263, 466, 388, 387, 386, 385,384, 398 ]
LEFT_EYEBROW =[ 336, 296, 334, 293, 300, 276, 283, 282, 295, 285 ]
# right eyes indices
RIGHT_EYE=[ 33, 7, 163, 144, 145, 153, 154, 155, 133, 173, 157, 158, 159, 160, 161 , 246 ]
RIGHT_EYEBROW=[ 70, 63, 105, 66, 107, 55, 65, 52, 53, 46 ]
map_face_mesh = mp.solutions.face_mesh
# camera object
camera = cv.VideoCapture(0)
_, frame = camera.read()
img = cv.resize(frame, None, fx=1.5, fy=1.5, interpolation=cv.INTER_CUBIC)
img_hieght, img_width = img.shape[:2]
print(img_hieght, img_width)
# video Recording setup
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('outputgaze.mp4', fourcc, 30.0, (img_width, img_hieght))
# landmark detection function
def landmarksDetection(img, results, draw=False):
img_height, img_width= img.shape[:2]
# list[(x,y), (x,y)....]
mesh_coord = [(int(point.x * img_width), int(point.y * img_height)) for point in results.multi_face_landmarks[0].landmark]
if draw :
[cv.circle(img, p, 2, (0,255,0), -1) for p in mesh_coord]
# returning the list of tuples for each landmarks
return mesh_coord
# Euclaidean distance
def euclaideanDistance(point, point1):
x, y = point
x1, y1 = point1
distance = math.sqrt((x1 - x)**2 + (y1 - y)**2)
return distance
# Blinking Ratio
def blinkRatio(img, landmarks, right_indices, left_indices):
# Right eyes
# horizontal line
rh_right = landmarks[right_indices[0]]
rh_left = landmarks[right_indices[8]]
# vertical line
rv_top = landmarks[right_indices[12]]
rv_bottom = landmarks[right_indices[4]]
# LEFT_EYE
# horizontal line
lh_right = landmarks[left_indices[0]]
lh_left = landmarks[left_indices[8]]
# vertical line
lv_top = landmarks[left_indices[12]]
lv_bottom = landmarks[left_indices[4]]
rhDistance = euclaideanDistance(rh_right, rh_left)
rvDistance = euclaideanDistance(rv_top, rv_bottom)
lvDistance = euclaideanDistance(lv_top, lv_bottom)
lhDistance = euclaideanDistance(lh_right, lh_left)
reRatio = rhDistance/rvDistance
leRatio = lhDistance/lvDistance
ratio = (reRatio+leRatio)/2
return ratio
# Eyes Extractor function,
def eyesExtractor(img, right_eye_coords, left_eye_coords):
# converting color image to scale image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# getting the dimension of image
dim = gray.shape
# creating mask from gray scale dim
mask = np.zeros(dim, dtype=np.uint8)
# drawing Eyes Shape on mask with white color
cv.fillPoly(mask, [np.array(right_eye_coords, dtype=np.int32)], 255)
cv.fillPoly(mask, [np.array(left_eye_coords, dtype=np.int32)], 255)
# showing the mask
# cv.imshow('mask', mask)
# draw eyes image on mask, where white shape is
eyes = cv.bitwise_and(gray, gray, mask=mask)
# change black color to gray other than eys
# cv.imshow('eyes draw', eyes)
eyes[mask==0] = 155
# getting minium and maximum x and y for right and left eyes
# For Right Eye
r_max_x = (max(right_eye_coords, key=lambda item: item[0]))[0]
r_min_x = (min(right_eye_coords, key=lambda item: item[0]))[0]
r_max_y = (max(right_eye_coords, key=lambda item : item[1]))[1]
r_min_y = (min(right_eye_coords, key=lambda item: item[1]))[1]
# For LEFT Eye
l_max_x = (max(left_eye_coords, key=lambda item: item[0]))[0]
l_min_x = (min(left_eye_coords, key=lambda item: item[0]))[0]
l_max_y = (max(left_eye_coords, key=lambda item : item[1]))[1]
l_min_y = (min(left_eye_coords, key=lambda item: item[1]))[1]
# croping the eyes from mask
cropped_right = eyes[r_min_y: r_max_y, r_min_x: r_max_x]
cropped_left = eyes[l_min_y: l_max_y, l_min_x: l_max_x]
# returning the cropped eyes
return cropped_right, cropped_left
# Eyes Position Estimator
def positionEstimator(cropped_eye):
# getting height and width of eye
h, w = cropped_eye.shape
# remove the noise from images
gaussain_blur = cv.GaussianBlur(cropped_eye, (9,9),0)
median_blur = cv.medianBlur(gaussain_blur, 3)
# applying thrsholding to convert binary_image
ret, threshed_eye = cv.threshold(median_blur, 130, 255, cv.THRESH_BINARY)
# create fixd part for eye with
piece = int(w/3)
# slicing the eyes into three parts
right_piece = threshed_eye[0:h, 0:piece]
center_piece = threshed_eye[0:h, piece: piece+piece]
left_piece = threshed_eye[0:h, piece +piece:w]
# calling pixel counter function
eye_position, color = pixelCounter(right_piece, center_piece, left_piece)
return eye_position, color
# creating pixel counter function
def pixelCounter(first_piece, second_piece, third_piece):
# counting black pixel in each part
right_part = np.sum(first_piece==0)
center_part = np.sum(second_piece==0)
left_part = np.sum(third_piece==0)
# creating list of these values
eye_parts = [right_part, center_part, left_part]
# getting the index of max values in the list
max_index = eye_parts.index(max(eye_parts))
pos_eye =''
if max_index==0:
pos_eye="RIGHT"
color=[utils.BLACK, utils.GREEN]
elif max_index==1:
pos_eye = 'CENTER'
color = [utils.YELLOW, utils.PINK]
elif max_index ==2:
pos_eye = 'LEFT'
color = [utils.GRAY, utils.YELLOW]
else:
pos_eye="Closed"
color = [utils.GRAY, utils.YELLOW]
return pos_eye, color
with map_face_mesh.FaceMesh(min_detection_confidence =0.5, min_tracking_confidence=0.5) as face_mesh:
# starting time here
start_time = time.time()
# starting Video loop here.
while True:
frame_counter +=1 # frame counter
ret, frame = camera.read() # getting frame from camera
if not ret:
break # no more frames break
# resizing frame
frame = cv.resize(frame, None, fx=1.5, fy=1.5, interpolation=cv.INTER_CUBIC)
frame_height, frame_width= frame.shape[:2]
rgb_frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR)
results = face_mesh.process(rgb_frame)
if results.multi_face_landmarks:
mesh_coords = landmarksDetection(frame, results, False)
ratio = blinkRatio(frame, mesh_coords, RIGHT_EYE, LEFT_EYE)
# cv.putText(frame, f'ratio {ratio}', (100, 100), FONTS, 1.0, utils.GREEN, 2)
utils.colorBackgroundText(frame, f'Ratio : {round(ratio,2)}', FONTS, 0.7, (30,100),2, utils.PINK, utils.YELLOW)
if ratio >5.5:
CEF_COUNTER +=1
# cv.putText(frame, 'Blink', (200, 50), FONTS, 1.3, utils.PINK, 2)
utils.colorBackgroundText(frame, f'Blink', FONTS, 1.7, (int(frame_height/2), 100), 2, utils.YELLOW, pad_x=6, pad_y=6, )
else:
if CEF_COUNTER>CLOSED_EYES_FRAME:
TOTAL_BLINKS +=1
CEF_COUNTER =0
# cv.putText(frame, f'Total Blinks: {TOTAL_BLINKS}', (100, 150), FONTS, 0.6, utils.GREEN, 2)
utils.colorBackgroundText(frame, f'Total Blinks: {TOTAL_BLINKS}', FONTS, 0.7, (30,150),2)
cv.polylines(frame, [np.array([mesh_coords[p] for p in LEFT_EYE ], dtype=np.int32)], True, utils.GREEN, 1, cv.LINE_AA)
cv.polylines(frame, [np.array([mesh_coords[p] for p in RIGHT_EYE ], dtype=np.int32)], True, utils.GREEN, 1, cv.LINE_AA)
# Blink Detector Counter Completed
right_coords = [mesh_coords[p] for p in RIGHT_EYE]
left_coords = [mesh_coords[p] for p in LEFT_EYE]
crop_right, crop_left = eyesExtractor(frame, right_coords, left_coords)
# cv.imshow('right', crop_right)
# cv.imshow('left', crop_left)
eye_position_right, color = positionEstimator(crop_right)
utils.colorBackgroundText(frame, f'R: {eye_position_right}', FONTS, 1.0, (40, 220), 2, color[0], color[1], 8, 8)
eye_position_left, color = positionEstimator(crop_left)
utils.colorBackgroundText(frame, f'L: {eye_position_left}', FONTS, 1.0, (40, 320), 2, color[0], color[1], 8, 8)
# Starting Voice Indicator
if eye_position_right=="RIGHT" and pygame.mixer.get_busy()==0 and counter_right<2:
# starting counter
counter_right+=1
# resetting counters
counter_center=0
counter_left=0
# playing voice
voice_center.play()
if eye_position_right=="LEFT" and pygame.mixer.get_busy()==0 and counter_left<2:
counter_left +=1
# resetting counters
counter_center=0
counter_right=0
# playing Voice
voice_center.play()
# calculating frame per seconds FPS
end_time = time.time()-start_time
fps = frame_counter/end_time
# frame =utils.textWithBackground(frame,f'FPS: {round(fps,1)}',FONTS, 1.0, (30, 50), bgOpacity=0.9, textThickness=2)
# writing image for thumbnail drawing shape
# cv.imwrite(f'img/frame_{frame_counter}.png', frame)
# wirting the video for demo purpose
out.write(frame)
cv.imshow('frame', frame)
key = cv.waitKey(2)
if key==ord('q') or key ==ord('Q'):
break
cv.destroyAllWindows()
camera.release()
|
the-stack_0_25079
|
"""
predict house price using linear regression with keras
"""
import tensorflow as tf
import keras
from matplotlib import pyplot as plt
# load data
(x_train, y_train), (x_test, y_test) = keras.datasets.boston_housing.load_data()
#x_train.shape == (404, 13)
#y_train.shape == (404,)
# normalize data
x_train_mean = x_train.mean(axis=0)
y_train_mean = y_train.mean()
x_train_std = x_train.std(axis=0)
y_train_std = y_train.std()
x_train = (x_train - x_train_mean) / x_train_std
y_train = (y_train - y_train_mean) / y_train_std
x_test = (x_test - x_train_mean) / x_train_std
y_test = (y_test - y_train_mean) / y_train_std
x = tf.placeholder(tf.float32, (None, 13), name='x')
y = tf.placeholder(tf.float32, (None, 1), name='y')
w = tf.Variable(tf.random_normal((13, 1)))
pred = tf.matmul(x, w)
loss = tf.reduce_mean((y - pred)**2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_step = optimizer.minimize(loss)
max_epoch = 100
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(max_epoch):
train_loss, _ = sess.run([loss, train_step], feed_dict={
x: x_train, y: y_train.reshape((-1, 1))})
print('i: {}, train_loss: {}'.format(i, train_loss))
test_predict = sess.run(pred, feed_dict={x: x_test})
plt.plot(x_test[:, 5], test_predict, 'bo', label='predict')
plt.plot(x_test[:, 5], y_test, 'ro', label='truth')
plt.legend()
plt.show()
|
the-stack_0_25080
|
from .eNMR_Methods import _eNMR_Methods
import pandas as pd
import xml.etree.ElementTree as etree
import re
import numpy as np
class Flo(_eNMR_Methods):
'''
This is the subsubclass of Masurement() and subclass of eNMR_Methods specialised to process data obtained from the experimental Swedish from Pavel set-up
the voltage list is valculated from the vd-values
path:
relative or absolute path to the measurements folder
expno:
the to the experiment number corresponding EXPNO
dependency:
'U': voltage dependent eNMR measurement
'G': fieldgradient dependent eNMR measurement
alias:
Here you can place an individual name relevant for plotting. If None, the path is taken instead.
lineb:
setting a standard-value for the linebroadening.
d:
electrode_distance
'''
def __init__(self, path, expno, dependency='U', alias=None, lineb=.3, d=2.2e-2, cell_resistance=None):
self.dependency = dependency
self.cell_resistance = cell_resistance
super().__init__(path, expno, lineb=lineb, alias=alias)
self._x_axis = {"G": 'g in T/m', "U": 'U / [V]', "I": "I / mA"}[dependency.upper()]
# import the diffusion parameters
diffpar = etree.parse(self.dateipfad+'/diff.xml')
root = diffpar.getroot()
self.Delta = float(root.findall('DELTA')[0].text)*1e-3
self.delta = float(root.findall('delta')[0].text)*1e-3 #in Seconds
print('The diffusion parameters were read from the respective .XML!')
if path[-1] == '/':
pulseprogram = open(path+str(expno)+'/pulseprogram')
else:
pulseprogram = open(path+'/'+str(expno)+'/pulseprogram')
self.pulseprogram = pulseprogram.read()
bitregex = r"(define list.*bit.*|define list.*pol.*)"
vlistregex = r".*Voltage\sset\sList.*= \n;.*]"
ilistregex = r".*Current\sset\sList.*= \n;.*]"
# list, reading all lines with bit lists in the pulse program
rawlist = re.findall(bitregex, self.pulseprogram)
rawvlist = re.findall(vlistregex, self.pulseprogram)
# check vor const U or I mode
if len(rawvlist) == 0:
rawvlist = re.findall(ilistregex, self.pulseprogram)
self.dependency = 'I'
self._x_axis = "I / mA"
print('dependency changed to "I" --> const current mode')
rawvlist = rawvlist[0].split('= \n;')
vlist = eval(rawvlist[1])
# array of integers generated from the rawlist. Indexing like [bit, voltagestep]
bitarray = np.array([[int(i) for i in re.findall('{.*}', rawlist[j])[0][1:-1].split(' ')] for j in range(len(rawlist))])
def byte_to_int(bitarray, row):
#converting the array into the correct string
bitstring = str(bitarray[:,row])[1:-1].replace(' ', '')
# check the last bit for the polarity
if bitstring[-1] == '1':
polarity = 1
elif bitstring[-1] == '0':
polarity = -1
# transformation of the bitstring minus polarity, which is the last bit, with a base of 2
intvar = int(bitstring[:-1][::-1], 2)
if self.dependency.upper() == 'U':
return round(polarity*intvar*200/255, 2)
if self.dependency.upper() == 'I':
return round(polarity*intvar*50/255, 2)
ulist = [byte_to_int(bitarray, i) for i in range(len(bitarray[0]))]
if ulist == vlist:
pass
else:
raise ValueError('The decoded voltage list does not match the endcoding voltage list! Revisit your pulse program!\n {} \n{}'.format(ulist, vlist))
self.eNMRraw = pd.DataFrame(ulist, columns=[self._x_axis])
try:
self.difflist = pd.read_csv(self.dateipfad+"/gradlist",
names=["g in T/m"])*0.01
except:
print('gradlist not found. difflist imported instead')
self.difflist = pd.read_csv(self.dateipfad+"/difflist",
names=["g in T/m"])*0.01
self.eNMRraw["g in T/m"] = self.difflist
self.d = d
self.g = self.eNMRraw["g in T/m"][0]
## converts the vd-List
#for i, n in enumerate(self.eNMRraw['vd']):
#self.eNMRraw.loc[i, 'vd_temp'] = float(n[:-1])
## calculates the applied Voltages
#if self.dependency.upper() == "U":
#self.eNMRraw[self._x_axis] = [
#0 if (self.eNMRraw.loc[i,'vd_temp'] <= 0.6)
#else
#n if i%2==0
#else
#n*-1
#for i, n in enumerate(self.eNMRraw['vd_temp']*5)]
#self.uInk = self.eNMRraw['U / [V]'][0] - self.eNMRraw['U / [V]'][1]
#if self.uInk == 0:
#self.uInk = self.eNMRraw['U / [V]'][0] - self.eNMRraw['U / [V]'][2]
#if self.uInk < 0:
#self.uInk *= -1
#elif self.dependency.upper() == "I":
#self.uInk = None
#self.eNMRraw[self._x_axis] = [
#0 if (self.eNMRraw.loc[i,'vd_temp'] <= 0.6)
#else
#n if i%2==0
#else
#n*-1
#for i, n in enumerate(self.eNMRraw['vd_temp'])
#]
#elif self.dependency.upper() == "RI":
#self.uInk = None
#self.eNMRraw[self._x_axis] = [
#0 if (self.eNMRraw.loc[i,'vd_temp'] <= 0.6)
#else
#n if i%2==0
#else
#n*-1
#for i, n in enumerate(self.eNMRraw['vd_temp'])
#]
#self.uInk = self.eNMRraw['RI / V'][0] - self.eNMRraw['RI / V'][1]
#if self.uInk == 0:
#self.uInk = self.eNMRraw['RI / V'][0] - self.eNMRraw['RI / V'][2]
#if self.uInk < 0:
#self.uInk *= -1
## calculation of the Voltage from cell resistance and Current /1000 because of mA
#self.eNMRraw[self._x_axis] *= self.cell_resistance/1000
def plot_spec(self, row, xlim=None, figsize=None, invert_xaxis=True, sharey=True):#, ppm=True):
from .Juergen1 import Juergen1 as eNMR_Measurement
return eNMR_Measurement.plot_spec(self, row, xlim, figsize, invert_xaxis, sharey)#, ppm=True):
|
the-stack_0_25084
|
import copy
import wrapt
from aws_xray_sdk.core import xray_recorder
class XRayTracedConn(wrapt.ObjectProxy):
_xray_meta = None
def __init__(self, conn, meta={}):
super(XRayTracedConn, self).__init__(conn)
self._xray_meta = meta
def cursor(self, *args, **kwargs):
cursor = self.__wrapped__.cursor(*args, **kwargs)
return XRayTracedCursor(cursor, self._xray_meta)
class XRayTracedCursor(wrapt.ObjectProxy):
_xray_meta = None
def __init__(self, cursor, meta={}):
super(XRayTracedCursor, self).__init__(cursor)
self._xray_meta = meta
# we preset database type if db is framework built-in
if not self._xray_meta.get('database_type'):
db_type = cursor.__class__.__module__.split('.')[0]
self._xray_meta['database_type'] = db_type
def __enter__(self):
value = self.__wrapped__.__enter__()
if value is not self.__wrapped__:
return value
return self
@xray_recorder.capture()
def execute(self, query, *args, **kwargs):
add_sql_meta(self._xray_meta)
subsegment = xray_recorder.current_subsegment()
if subsegment:
subsegment.put_metadata("SQL Statement", query, "memrise")
return self.__wrapped__.execute(query, *args, **kwargs)
@xray_recorder.capture()
def executemany(self, query, *args, **kwargs):
add_sql_meta(self._xray_meta)
return self.__wrapped__.executemany(query, *args, **kwargs)
@xray_recorder.capture()
def callproc(self, proc, args):
add_sql_meta(self._xray_meta)
return self.__wrapped__.callproc(proc, args)
def add_sql_meta(meta):
subsegment = xray_recorder.current_subsegment()
if not subsegment:
return
if meta.get('name', None):
subsegment.name = meta['name']
sql_meta = copy.copy(meta)
if sql_meta.get('name', None):
del sql_meta['name']
subsegment.set_sql(sql_meta)
subsegment.namespace = 'remote'
|
the-stack_0_25085
|
_base_ = [
'../_base_/datasets/dotav1.py', '../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
angle_version = 'oc'
model = dict(
type='RotatedRetinaNet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
zero_init_residual=False,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RotatedRetinaHead',
num_classes=15,
in_channels=256,
stacked_convs=4,
feat_channels=256,
assign_by_circumhbbox=None,
anchor_generator=dict(
type='RotatedAnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[1.0, 0.5, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHAOBBoxCoder',
angle_range=angle_version,
norm_factor=None,
edge_swap=False,
proj_xy=False,
target_means=(.0, .0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1,
iou_calculator=dict(type='RBboxOverlaps2D')),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(iou_thr=0.1),
max_per_img=2000))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RResize', img_scale=(1024, 1024)),
dict(
type='RRandomFlip',
flip_ratio=[0.25, 0.25, 0.25],
direction=['horizontal', 'vertical', 'diagonal'],
version=angle_version),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data = dict(
train=dict(pipeline=train_pipeline, version=angle_version),
val=dict(version=angle_version),
test=dict(version=angle_version))
|
the-stack_0_25086
|
from django.urls import reverse
DUMMY_ID = 999999
simple_table = {
'dom': 't',
'no_col_search': True,
'no_footer': True,
'pageLength': 400,
'stateSave': False
}
def row_button(command, button_text, *, function='Html', button_classes='btn btn-sm', **kwargs):
rb = {
'html': (f'<button data-command="{command}" onclick="django_datatables.b_r(this)" '
f'class="{button_classes}">{button_text}</button>'),
'function': function,
}
rb.update(kwargs)
return rb
def render_replace(*, var='%1%', **kwargs):
return dict(var=var, function='Replace', **kwargs)
def get_url(url_name):
if type(url_name) == tuple:
return reverse(url_name[0], args=[*url_name[1:]])
else:
if url_name.find(str(DUMMY_ID)) == -1:
return reverse(url_name, args=[DUMMY_ID])
return url_name
def row_link(url_name, column_id):
return [render_replace(column=column_id, html=get_url(url_name), var=str(DUMMY_ID))]
|
the-stack_0_25087
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import sys
import glob
import argparse
import threading
import six.moves.queue as Queue
import traceback
import numpy as np
import tensorflow as tf
import PIL.Image
import pdb
import tfutil
import dataset
#----------------------------------------------------------------------------
def error(msg):
print('Error: ' + msg)
exit(1)
#----------------------------------------------------------------------------
class TFRecordExporter:
def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):
self.tfrecord_dir = tfrecord_dir
self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
self.expected_images = expected_images
self.cur_images = 0
self.shape = None
self.resolution_log2 = None
self.tfr_writers = []
self.print_progress = print_progress
self.progress_interval = progress_interval
if self.print_progress:
print('Creating dataset "%s"' % tfrecord_dir)
if not os.path.isdir(self.tfrecord_dir):
os.makedirs(self.tfrecord_dir)
assert(os.path.isdir(self.tfrecord_dir))
def close(self):
if self.print_progress:
print('%-40s\r' % 'Flushing data...', end='', flush=True)
for tfr_writer in self.tfr_writers:
tfr_writer.close()
self.tfr_writers = []
if self.print_progress:
print('%-40s\r' % '', end='', flush=True)
print('Added %d images.' % self.cur_images)
def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
order = np.arange(self.expected_images)
np.random.RandomState(123).shuffle(order)
return order
def add_image(self, img):
#pdb.set_trace()
if self.print_progress and self.cur_images % self.progress_interval == 0:
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2**self.resolution_log2
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
for lod in range(self.resolution_log2 - 1):
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
quant = np.rint(img).clip(0, 255).astype(np.uint8)
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_labels(self, labels):
#print('labels:{}'.format(labels.shape))
#print('self.cur:{}'.format(self.cur_images))
#pdb.set_trace()
if self.print_progress:
print('%-40s\r' % 'Saving labels...', end='', flush=True)
assert labels.shape[0] == self.cur_images
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, labels.astype(np.float32))
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#----------------------------------------------------------------------------
class ExceptionInfo(object):
def __init__(self):
self.value = sys.exc_info()[1]
self.traceback = traceback.format_exc()
#----------------------------------------------------------------------------
class WorkerThread(threading.Thread):
def __init__(self, task_queue):
threading.Thread.__init__(self)
self.task_queue = task_queue
def run(self):
while True:
func, args, result_queue = self.task_queue.get()
if func is None:
break
try:
result = func(*args)
except:
result = ExceptionInfo()
result_queue.put((result, args))
#----------------------------------------------------------------------------
class ThreadPool(object):
def __init__(self, num_threads):
assert num_threads >= 1
self.task_queue = Queue.Queue()
self.result_queues = dict()
self.num_threads = num_threads
for idx in range(self.num_threads):
thread = WorkerThread(self.task_queue)
thread.daemon = True
thread.start()
def add_task(self, func, args=()):
assert hasattr(func, '__call__') # must be a function
if func not in self.result_queues:
self.result_queues[func] = Queue.Queue()
self.task_queue.put((func, args, self.result_queues[func]))
def get_result(self, func): # returns (result, args)
result, args = self.result_queues[func].get()
if isinstance(result, ExceptionInfo):
print('\n\nWorker thread caught an exception:\n' + result.traceback)
raise result.value
return result, args
def finish(self):
for idx in range(self.num_threads):
self.task_queue.put((None, (), None))
def __enter__(self): # for 'with' statement
return self
def __exit__(self, *excinfo):
self.finish()
def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None):
if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4
assert max_items_in_flight >= 1
results = []
retire_idx = [0]
def task_func(prepared, idx):
return process_func(prepared)
def retire_result():
processed, (prepared, idx) = self.get_result(task_func)
results[idx] = processed
while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:
yield post_func(results[retire_idx[0]])
results[retire_idx[0]] = None
retire_idx[0] += 1
for idx, item in enumerate(item_iterator):
prepared = pre_func(item)
results.append(None)
self.add_task(func=task_func, args=(prepared, idx))
while retire_idx[0] < idx - max_items_in_flight + 2:
for res in retire_result(): yield res
while retire_idx[0] < len(results):
for res in retire_result(): yield res
#----------------------------------------------------------------------------
def display(tfrecord_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
idx = 0
while True:
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if idx == 0:
print('Displaying images')
import cv2 # pip install opencv-python
cv2.namedWindow('dataset_tool')
print('Press SPACE or ENTER to advance, ESC to exit')
print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist()))
cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR
idx += 1
if cv2.waitKey() == 27:
break
print('\nDisplayed %d images.' % idx)
#----------------------------------------------------------------------------
def extract(tfrecord_dir, output_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
print('Extracting images to "%s"' % output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
idx = 0
while True:
if idx % 10 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if images.shape[1] == 1:
img = PIL.Image.fromarray(images[0][0], 'L')
else:
img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')
img.save(os.path.join(output_dir, 'img%08d.png' % idx))
idx += 1
print('Extracted %d images.' % idx)
#----------------------------------------------------------------------------
def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels):
max_label_size = 0 if ignore_labels else 'full'
print('Loading dataset "%s"' % tfrecord_dir_a)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
print('Loading dataset "%s"' % tfrecord_dir_b)
dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
print('Comparing datasets')
idx = 0
identical_images = 0
identical_labels = 0
while True:
if idx % 100 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images_a, labels_a = dset_a.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_a, labels_a = None, None
try:
images_b, labels_b = dset_b.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_b, labels_b = None, None
if images_a is None or images_b is None:
if images_a is not None or images_b is not None:
print('Datasets contain different number of images')
break
if images_a.shape == images_b.shape and np.all(images_a == images_b):
identical_images += 1
else:
print('Image %d is different' % idx)
if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b):
identical_labels += 1
else:
print('Label %d is different' % idx)
idx += 1
print('Identical images: %d / %d' % (identical_images, idx))
if not ignore_labels:
print('Identical labels: %d / %d' % (identical_labels, idx))
#----------------------------------------------------------------------------
def create_mnist(tfrecord_dir, mnist_dir):
print('Loading MNIST from "%s"' % mnist_dir)
import gzip
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16)
with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
images = images.reshape(-1, 1, 28, 28)
images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
assert labels.shape == (60000,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):
print('Loading MNIST from "%s"' % mnist_dir)
import gzip
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16)
images = images.reshape(-1, 28, 28)
images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
with TFRecordExporter(tfrecord_dir, num_images) as tfr:
rnd = np.random.RandomState(random_seed)
for idx in range(num_images):
tfr.add_image(images[rnd.randint(images.shape[0], size=3)])
#----------------------------------------------------------------------------
def create_cifar10(tfrecord_dir, cifar10_dir):
print('Loading CIFAR-10 from "%s"' % cifar10_dir)
import pickle
images = []
labels = []
for batch in range(1, 6):
with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images.append(data['data'].reshape(-1, 3, 32, 32))
labels.append(data['labels'])
images = np.concatenate(images)
labels = np.concatenate(labels)
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (50000,) and labels.dtype == np.int32
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_cifar100(tfrecord_dir, cifar100_dir):
print('Loading CIFAR-100 from "%s"' % cifar100_dir)
import pickle
with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images = data['data'].reshape(-1, 3, 32, 32)
labels = np.array(data['fine_labels'])
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (50000,) and labels.dtype == np.int32
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 99
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_svhn(tfrecord_dir, svhn_dir):
print('Loading SVHN from "%s"' % svhn_dir)
import pickle
images = []
labels = []
for batch in range(1, 4):
with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images.append(data[0])
labels.append(data[1])
images = np.concatenate(images)
labels = np.concatenate(labels)
assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (73257,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None):
print('Loading LSUN dataset from "%s"' % lmdb_dir)
import lmdb # pip install lmdb
import cv2 # pip install opencv-python
import io
with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:
total_images = txn.stat()['entries']
if max_images is None:
max_images = total_images
with TFRecordExporter(tfrecord_dir, max_images) as tfr:
for idx, (key, value) in enumerate(txn.cursor()):
try:
try:
img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)
if img is None:
raise IOError('cv2.imdecode failed')
img = img[:, :, ::-1] # BGR => RGB
except IOError:
img = np.asarray(PIL.Image.open(io.BytesIO(value)))
crop = np.min(img.shape[:2])
img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS)
img = np.asarray(img)
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
except:
print(sys.exc_info()[1])
if tfr.cur_images == max_images:
break
#----------------------------------------------------------------------------
def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):
print('Loading CelebA from "%s"' % celeba_dir)
glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')
image_filenames = sorted(glob.glob(glob_pattern))
expected_images = 202599
if len(image_filenames) != expected_images:
error('Expected to find %d images' % expected_images)
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
assert img.shape == (218, 178, 3)
img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
#----------------------------------------------------------------------------
def create_celebahq(tfrecord_dir, celeba_dir, delta_dir, num_threads=4, num_tasks=100):
print('Loading CelebA from "%s"' % celeba_dir)
expected_images = 202599
if len(glob.glob(os.path.join(celeba_dir, 'img_celeba', '*.jpg'))) != expected_images:
error('Expected to find %d images' % expected_images)
with open(os.path.join(celeba_dir, 'Anno', 'list_landmarks_celeba.txt'), 'rt') as file:
landmarks = [[float(value) for value in line.split()[1:]] for line in file.readlines()[2:]]
landmarks = np.float32(landmarks).reshape(-1, 5, 2)
print('Loading CelebA-HQ deltas from "%s"' % delta_dir)
import scipy.ndimage
import hashlib
import bz2
import zipfile
import base64
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.kdf.pbkdf2
import cryptography.fernet
expected_zips = 30
if len(glob.glob(os.path.join(delta_dir, 'delta*.zip'))) != expected_zips:
error('Expected to find %d zips' % expected_zips)
with open(os.path.join(delta_dir, 'image_list.txt'), 'rt') as file:
lines = [line.split() for line in file]
fields = dict()
for idx, field in enumerate(lines[0]):
type = int if field.endswith('idx') else str
fields[field] = [type(line[idx]) for line in lines[1:]]
indices = np.array(fields['idx'])
# Must use pillow version 3.1.1 for everything to work correctly.
if getattr(PIL, 'PILLOW_VERSION', '') != '3.1.1':
error('create_celebahq requires pillow version 3.1.1') # conda install pillow=3.1.1
# Must use libjpeg version 8d for everything to work correctly.
img = np.array(PIL.Image.open(os.path.join(celeba_dir, 'img_celeba', '000001.jpg')))
md5 = hashlib.md5()
md5.update(img.tobytes())
if md5.hexdigest() != '9cad8178d6cb0196b36f7b34bc5eb6d3':
error('create_celebahq requires libjpeg version 8d') # conda install jpeg=8d
def rot90(v):
return np.array([-v[1], v[0]])
def process_func(idx):
# Load original image.
orig_idx = fields['orig_idx'][idx]
orig_file = fields['orig_file'][idx]
orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file)
img = PIL.Image.open(orig_path)
# Choose oriented crop rectangle.
lm = landmarks[orig_idx]
eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5
mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5
eye_to_eye = lm[1] - lm[0]
eye_to_mouth = mouth_avg - eye_avg
x = eye_to_eye - rot90(eye_to_mouth)
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = rot90(x)
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
zoom = 1024 / (np.hypot(*x) * 2)
# Shrink.
shrink = int(np.floor(0.5 / zoom))
if shrink > 1:
size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink)))
img = img.resize(size, PIL.Image.ANTIALIAS)
quad /= shrink
zoom *= shrink
# Crop.
border = max(int(np.round(1024 * 0.1 / zoom)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Simulate super-resolution.
superres = int(np.exp2(np.ceil(np.log2(zoom))))
if superres > 1:
img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS)
quad *= superres
zoom /= superres
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if max(pad) > border - 4:
pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.mgrid[:h, :w, :1]
mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum(np.float32(w-1-x) / pad[2], np.float32(h-1-y) / pad[3]))
blur = 1024 * 0.02 / zoom
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB')
quad += pad[0:2]
# Transform.
img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
img = img.resize((1024, 1024), PIL.Image.ANTIALIAS)
img = np.asarray(img).transpose(2, 0, 1)
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['proc_md5'][idx]
# Load delta image and original JPG.
with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip:
delta_bytes = zip.read('delta%05d.dat' % idx)
with open(orig_path, 'rb') as file:
orig_bytes = file.read()
# Decrypt delta image, using original JPG data as decryption key.
algorithm = cryptography.hazmat.primitives.hashes.SHA256()
backend = cryptography.hazmat.backends.default_backend()
salt = bytes(orig_file, 'ascii')
kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend)
key = base64.urlsafe_b64encode(kdf.derive(orig_bytes))
delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024)
# Apply delta image.
img = img + delta
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['final_md5'][idx]
return img
with TFRecordExporter(tfrecord_dir, indices.size) as tfr:
order = tfr.choose_shuffled_order()
with ThreadPool(num_threads) as pool:
for img in pool.process_items_concurrently(indices[order].tolist(), process_func=process_func, max_items_in_flight=num_tasks):
tfr.add_image(img)
#----------------------------------------------------------------------------
def create_from_images(tfrecord_dir, image_dir, shuffle):
print('Loading images from "%s"' % image_dir)
image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
if len(image_filenames) == 0:
error('No input images found')
imgval = np.asarray(PIL.Image.open(image_filenames[0]))
resolution = imgval.shape[0]
channels = imgval.shape[2] if imgval.ndim == 3 else 1
if imgval.shape[1] != resolution:
error('Input images must have the same width and height')
if resolution != 2 ** int(np.floor(np.log2(resolution))):
error('Input image resolution must be a power-of-two')
if channels not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
label_value = np.zeros((len(image_filenames),4), dtype=np.float32)
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
name = image_filenames[order[idx]].split('/')[-1] #88888_222021.png 11111_2220211.png
#label_value = np.zeros((1,4), dtype=np.float32)
#0:gender 1:eyeglasses(0 for other, 1 for black) 3:smile 4:age
label_value[order[idx],0]=0.0 if int(name[6])==2 else 1.0#0 for woman
label_value[order[idx],1]=0.0 if int(name[10])==2 else 1.0#0 for no glass
label_value[order[idx],2]=1.0 if int(name[9])==2 else 0.0#1 for smile
label_value[order[idx],3]=1.0 if int(name[7])==3 else 0.0#1 for black(baby)
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
pdb.set_trace()
tfr.add_labels(label_value[order])
#----------------------------------------------------------------------------
def create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle):
print('Loading HDF5 archive from "%s"' % hdf5_filename)
import h5py # conda install h5py
with h5py.File(hdf5_filename, 'r') as hdf5_file:
hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3])
with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0])
for idx in range(order.size):
tfr.add_image(hdf5_data[order[idx]])
npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy'
if os.path.isfile(npy_filename):
tfr.add_labels(np.load(npy_filename)[order])
#----------------------------------------------------------------------------
def execute_cmdline(argv):
prog = argv[0]
parser = argparse.ArgumentParser(
prog = prog,
description = 'Tool for creating, extracting, and visualizing Progressive GAN datasets.',
epilog = 'Type "%s <command> -h" for more information.' % prog)
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
def add_command(cmd, desc, example=None):
epilog = 'Example: %s %s' % (prog, example) if example is not None else None
return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
p = add_command( 'display', 'Display images in dataset.',
'display datasets/mnist')
p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
p = add_command( 'extract', 'Extract images from dataset.',
'extract datasets/mnist mnist-images')
p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
p.add_argument( 'output_dir', help='Directory to extract the images into')
p = add_command( 'compare', 'Compare two datasets.',
'compare datasets/mydataset datasets/mnist')
p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset')
p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset')
p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0)
p = add_command( 'create_mnist', 'Create dataset for MNIST.',
'create_mnist datasets/mnist ~/downloads/mnist')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'mnist_dir', help='Directory containing MNIST')
p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.',
'create_mnistrgb datasets/mnistrgb ~/downloads/mnist')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'mnist_dir', help='Directory containing MNIST')
p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000)
p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123)
p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.',
'create_cifar10 datasets/cifar10 ~/downloads/cifar10')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10')
p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.',
'create_cifar100 datasets/cifar100 ~/downloads/cifar100')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100')
p = add_command( 'create_svhn', 'Create dataset for SVHN.',
'create_svhn datasets/svhn ~/downloads/svhn')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'svhn_dir', help='Directory containing SVHN')
p = add_command( 'create_lsun', 'Create dataset for single LSUN category.',
'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'lmdb_dir', help='Directory containing LMDB database')
p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256)
p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None)
p = add_command( 'create_celeba', 'Create dataset for CelebA.',
'create_celeba datasets/celeba ~/downloads/celeba')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89)
p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121)
p = add_command( 'create_celebahq', 'Create dataset for CelebA-HQ.',
'create_celebahq datasets/celebahq ~/downloads/celeba ~/downloads/celeba-hq-deltas')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
p.add_argument( 'delta_dir', help='Directory containing CelebA-HQ deltas')
p.add_argument( '--num_threads', help='Number of concurrent threads (default: 4)', type=int, default=4)
p.add_argument( '--num_tasks', help='Number of concurrent processing tasks (default: 100)', type=int, default=100)
p = add_command( 'create_from_images', 'Create dataset from a directory full of images.',
'create_from_images datasets/mydataset myimagedir')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'image_dir', help='Directory containing the images')
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.',
'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images')
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])
func = globals()[args.command]
del args.command
func(**vars(args))
#----------------------------------------------------------------------------
if __name__ == "__main__":
execute_cmdline(sys.argv)
#----------------------------------------------------------------------------
|
the-stack_0_25089
|
from sqlalchemy.ext.declarative import declarative_base
from history_meta import VersionedMeta, VersionedListener
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.orm import clear_mappers, compile_mappers, sessionmaker, deferred
from sqlalchemy.test.testing import TestBase, eq_
from sqlalchemy.test.entities import ComparableEntity
def setup():
global engine
engine = create_engine('sqlite://', echo=True)
class TestVersioning(TestBase):
def setup(self):
global Base, Session
Base = declarative_base(metaclass=VersionedMeta, bind=engine)
Session = sessionmaker(extension=VersionedListener())
def teardown(self):
clear_mappers()
Base.metadata.drop_all()
def create_tables(self):
Base.metadata.create_all()
def test_plain(self):
class SomeClass(Base, ComparableEntity):
__tablename__ = 'sometable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
self.create_tables()
sess = Session()
sc = SomeClass(name='sc1')
sess.add(sc)
sess.commit()
sc.name = 'sc1modified'
sess.commit()
assert sc.version == 2
SomeClassHistory = SomeClass.__history_mapper__.class_
eq_(
sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(),
[SomeClassHistory(version=1, name='sc1')]
)
sc.name = 'sc1modified2'
eq_(
sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
[
SomeClassHistory(version=1, name='sc1'),
SomeClassHistory(version=2, name='sc1modified')
]
)
assert sc.version == 3
sess.commit()
sc.name = 'temp'
sc.name = 'sc1modified2'
sess.commit()
eq_(
sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
[
SomeClassHistory(version=1, name='sc1'),
SomeClassHistory(version=2, name='sc1modified')
]
)
sess.delete(sc)
sess.commit()
eq_(
sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
[
SomeClassHistory(version=1, name='sc1'),
SomeClassHistory(version=2, name='sc1modified'),
SomeClassHistory(version=3, name='sc1modified2')
]
)
def test_deferred(self):
"""test versioning of unloaded, deferred columns."""
class SomeClass(Base, ComparableEntity):
__tablename__ = 'sometable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
data = deferred(Column(String(25)))
self.create_tables()
sess = Session()
sc = SomeClass(name='sc1', data='somedata')
sess.add(sc)
sess.commit()
sess.close()
sc = sess.query(SomeClass).first()
assert 'data' not in sc.__dict__
sc.name = 'sc1modified'
sess.commit()
assert sc.version == 2
SomeClassHistory = SomeClass.__history_mapper__.class_
eq_(
sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(),
[SomeClassHistory(version=1, name='sc1', data='somedata')]
)
def test_joined_inheritance(self):
class BaseClass(Base, ComparableEntity):
__tablename__ = 'basetable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(20))
__mapper_args__ = {'polymorphic_on':type, 'polymorphic_identity':'base'}
class SubClassSeparatePk(BaseClass):
__tablename__ = 'subtable1'
id = Column(Integer, primary_key=True)
base_id = Column(Integer, ForeignKey('basetable.id'))
subdata1 = Column(String(50))
__mapper_args__ = {'polymorphic_identity':'sep'}
class SubClassSamePk(BaseClass):
__tablename__ = 'subtable2'
id = Column(Integer, ForeignKey('basetable.id'), primary_key=True)
subdata2 = Column(String(50))
__mapper_args__ = {'polymorphic_identity':'same'}
self.create_tables()
sess = Session()
sep1 = SubClassSeparatePk(name='sep1', subdata1='sep1subdata')
base1 = BaseClass(name='base1')
same1 = SubClassSamePk(name='same1', subdata2='same1subdata')
sess.add_all([sep1, base1, same1])
sess.commit()
base1.name = 'base1mod'
same1.subdata2 = 'same1subdatamod'
sep1.name ='sep1mod'
sess.commit()
BaseClassHistory = BaseClass.__history_mapper__.class_
SubClassSeparatePkHistory = SubClassSeparatePk.__history_mapper__.class_
SubClassSamePkHistory = SubClassSamePk.__history_mapper__.class_
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id).all(),
[
SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1)
]
)
same1.subdata2 = 'same1subdatamod2'
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
[
SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2)
]
)
base1.name = 'base1mod2'
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
[
SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
BaseClassHistory(id=2, name=u'base1mod', type=u'base', version=2),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2)
]
)
def test_single_inheritance(self):
class BaseClass(Base, ComparableEntity):
__tablename__ = 'basetable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(50))
__mapper_args__ = {'polymorphic_on':type, 'polymorphic_identity':'base'}
class SubClass(BaseClass):
subname = Column(String(50))
__mapper_args__ = {'polymorphic_identity':'sub'}
self.create_tables()
sess = Session()
b1 = BaseClass(name='b1')
sc = SubClass(name='s1', subname='sc1')
sess.add_all([b1, sc])
sess.commit()
b1.name='b1modified'
BaseClassHistory = BaseClass.__history_mapper__.class_
SubClassHistory = SubClass.__history_mapper__.class_
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
[BaseClassHistory(id=1, name=u'b1', type=u'base', version=1)]
)
sc.name ='s1modified'
b1.name='b1modified2'
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
[
BaseClassHistory(id=1, name=u'b1', type=u'base', version=1),
BaseClassHistory(id=1, name=u'b1modified', type=u'base', version=2),
SubClassHistory(id=2, name=u's1', type=u'sub', version=1)
]
)
|
the-stack_0_25090
|
from opentera.forms.TeraForm import *
from modules.DatabaseModule.DBManagerTeraUserAccess import DBManagerTeraUserAccess
from flask_babel import gettext
class TeraUserGroupForm:
@staticmethod
def get_user_group_form(user_access: DBManagerTeraUserAccess):
form = TeraForm("user_group")
# Building lists
#################
# None to build!
# Sections
section = TeraFormSection("infos", gettext("Information"))
form.add_section(section)
# Items
section.add_item(TeraFormItem("id_user_group", gettext("User Group ID"), "hidden",
item_required=True))
section.add_item(TeraFormItem("user_group_name", gettext("User Group Name"), "text",
item_required=True))
return form.to_dict()
|
the-stack_0_25091
|
# Copyright (c) 2010 Pedro Matiello <[email protected]>
# Juarez Bochi <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
PageRank algoritm
@sort: pagerank
"""
def pagerank(graph, damping_factor=0.85, max_iterations=100, min_delta=0.00001):
"""
Compute and return the PageRank in an directed graph.
@type graph: digraph
@param graph: Digraph.
@type damping_factor: number
@param damping_factor: PageRank dumping factor.
@type max_iterations: number
@param max_iterations: Maximum number of iterations.
@type min_delta: number
@param min_delta: Smallest variation required to have a new iteration.
@rtype: Dict
@return: Dict containing all the nodes PageRank.
"""
nodes = graph.nodes()
graph_size = len(nodes)
if graph_size == 0:
return {}
min_value = (1.0-damping_factor)/graph_size #value for nodes without inbound links
# itialize the page rank dict with 1/N for all nodes
pagerank = dict.fromkeys(nodes, 1.0/graph_size)
for i in range(max_iterations):
diff = 0 #total difference compared to last iteraction
# computes each node PageRank based on inbound links
for node in nodes:
rank = min_value
for referring_page in graph.incidents(node):
rank += damping_factor * pagerank[referring_page] / len(graph.neighbors(referring_page))
diff += abs(pagerank[node] - rank)
pagerank[node] = rank
#stop if PageRank has converged
if diff < min_delta:
break
return pagerank
|
the-stack_0_25092
|
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# test
wqData = waterQuality.DataModelWQ('Silica64')
outLst = ['Silica64-Y8090-opt1', 'Silica64-Y8090-00955-opt1']
code = '00955'
trainset = 'Y8090'
testset = 'Y0010'
errMatLst1 = list()
errMatLst2 = list()
for outName in outLst:
master = basins.loadMaster(outName)
dataName = master['dataName']
# wqData = waterQuality.DataModelWQ(dataName)
# point test
yP1, ycP1 = basins.testModel(outName, trainset, wqData=wqData)
errMatC1 = wqData.errBySiteC(ycP1, subset=trainset, varC=master['varYC'])
yP2, ycP2 = basins.testModel(outName, testset, wqData=wqData)
errMatC2 = wqData.errBySiteC(ycP2, subset=testset, varC=master['varYC'])
ycL1, ycL2 = basins.modelLinear(
outName, testset, trainset=trainset, wqData=wqData)
errMatL1 = wqData.errBySiteC(ycL1, subset=trainset, varC=master['varYC'])
errMatL2 = wqData.errBySiteC(ycL2, subset=testset, varC=master['varYC'])
ic = master['varYC'].index(code)
errMatLst1.append(errMatC1[:, ic, :])
errMatLst2.append(errMatC2[:, ic, :])
errMatLst1.append(errMatL1[:, ic, :])
errMatLst2.append(errMatL2[:, ic, :])
# box
for k in range(2):
dataBox = list()
for errMatLst in [errMatLst1, errMatLst2]:
temp = [errMat[:, k] for errMat in errMatLst]
dataBox.append(temp)
label1 = ['B2000', 'A2000']
label2 = ['all C', 'all C, linear','single C', 'single C, linear']
fig = figplot.boxPlot(dataBox, label1=label1, label2=label2, sharey=True)
fig.suptitle('RMSE') if k == 0 else fig.suptitle('Correlation')
fig.show()
|
the-stack_0_25094
|
#iteration:
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if (not head) or (not head.next):
return head
slow = fast = head
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
p1 = self.sortList(slow.next)
slow.next = None
p2 = self.sortList(head)
p = res = ListNode(None)
while p1 and p2:
if p1.val<p2.val:
p.next = p1
p1 = p1.next
p = p.next
else:
p.next = p2
p2 = p2.next
p = p.next
if p1:
p.next = p1
if p2:
p.next = p2
return res.next
#array...
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
items = []
while head:
items.append(head.val)
head = head.next
items.sort()
p = res = ListNode(None)
for i in items:
p.next = ListNode(i)
p = p.next
return res.next
|
the-stack_0_25095
|
import unittest
import six
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.functions.rnn import slstm
from chainer.testing import backend
def _sigmoid(x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
def inject_backend_tests(method_names):
decorator = backend.inject_backend_tests(
method_names,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
}) +
# GPU tests
[{'use_cuda': True}])
return decorator
@testing.parameterize(*testing.product_dict(
[
{'shape': (5, 6, 2)},
{'shape': (8, 9, 4, 5)},
{'shape': (1, 0, 5)},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
], [
{'grad_outputs': (True, True)},
{'grad_outputs': (True, False)},
{'grad_outputs': (False, True)},
], [
{'flat': True},
{'flat': False},
]
))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestSLSTM(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
# TODO(dido1998) : Remove this skip
if self.grad_outputs[0] is False or self.grad_outputs[1] is False:
self.skip_double_backward_test = True
def generate_inputs(self):
x_shape = []
x_shape.append(self.shape[0])
x_shape.append(4 * self.shape[1])
for i in range(2, len(self.shape)):
x_shape.append(self.shape[i])
x_shape = tuple(x_shape)
c1 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
c2 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
x1 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
x2 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
if self.flat:
return c1[..., 0], c2[..., 0], x1[..., 0], x2[..., 0],
else:
return c1, c2, x1, x2,
def forward(self, inputs, device):
c1, c2, x1, x2 = inputs
out = functions.slstm(c1, c2, x1, x2)
return out
def forward_expected(self, inputs):
c_prev1, c_prev2, x1, x2 = inputs
def _extract_gates(x):
r = x.reshape((x.shape[0], x.shape[1] // 4, 4) + x.shape[2:])
return (r[:, :, i] for i in six.moves.range(4))
a1_in, i1_in, f1_in, o1_in = _extract_gates(x1)
a2_in, i2_in, f2_in, o2_in = _extract_gates(x2)
c_expect = _sigmoid(i1_in) * numpy.tanh(a1_in) + \
_sigmoid(i2_in) * numpy.tanh(a2_in) + \
_sigmoid(f1_in) * c_prev1 + \
_sigmoid(f2_in) * c_prev2
h_expect = _sigmoid(o1_in + o2_in) * numpy.tanh(c_expect)
return c_expect, h_expect
def generate_grad_outputs(self, outputs_template):
grad_out = []
c = outputs_template[0]
h = outputs_template[1]
c_shape = c.shape
h_shape = h.shape
if self.grad_outputs[0] is True:
grad_out.append(numpy.random.uniform(-1, 1,
h_shape).astype(h.dtype))
else:
grad_out.append(None)
if self.grad_outputs[1] is True:
grad_out.append(numpy.random.uniform(-1, 1,
c_shape).astype(c.dtype))
else:
grad_out.append(None)
return tuple(grad_out)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@inject_backend_tests(['test_backward'])
class TestSLSTMGrad(unittest.TestCase):
def setUp(self):
c_prev1 = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
c_prev2 = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
x1 = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(self.dtype)
x2 = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(self.dtype)
c_next = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
gc = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
gh = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
ggc_prev1 = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
ggc_prev2 = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
ggx1 = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(self.dtype)
ggx2 = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(self.dtype)
self.inputs = [c_prev1, c_prev2, x1, x2, c_next, gc, gh]
self.grad_outputs = [ggc_prev1, ggc_prev2, ggx1, ggx2]
self.check_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-3, 'rtol': 1e-2}
def check_backward(self, inputs, grad_outputs, backend_config):
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
grad_outputs = cuda.to_gpu(grad_outputs)
with backend_config:
gradient_check.check_backward(
slstm.SLSTMGrad(), inputs, grad_outputs,
**self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
testing.run_module(__name__, __file__)
|
the-stack_0_25096
|
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
from requests_aws4auth import AWS4Auth
from os import getenv
import logging
logging.basicConfig()
logger = logging.getLogger('gqllayer')
logger.setLevel('INFO')
class BackendGqlClass:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# get temporary creds from lambda env
awsauth = AWS4Auth(
getenv('AWS_ACCESS_KEY_ID'),
getenv('AWS_SECRET_ACCESS_KEY'),
getenv('AWS_REGION'),
'appsync',
session_token=getenv('AWS_SESSION_TOKEN')
)
headers = {
'Content-Type': 'application/json'
}
appsync_endpoint = getenv('APPSYNC_ENDPOINT')
job_import_transport = RequestsHTTPTransport(
url=appsync_endpoint,
use_json=True,
headers=headers,
verify=True,
retries=3,
auth=awsauth
)
self._client = Client(
transport=job_import_transport,
fetch_schema_from_transport=True
)
def client(self):
return self._client
def return_gql(self, gql_string):
return gql(gql_string)
|
the-stack_0_25097
|
# ----------------------------------------------------------------------
# Collection test utilities
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import os
from collections import defaultdict
# Third-party modules
import orjson
import pytest
class CollectionTestHelper(object):
COLLECTIONS = "collections"
def __init__(self, model):
self.model = model
self.collection = model._meta["json_collection"]
self.cache = None
self._params = []
self._uuid_count = defaultdict(int)
self._name_count = defaultdict(int)
def _iter_params(self):
for root, _, files in os.walk(os.path.join(self.COLLECTIONS, self.collection)):
for f in files:
if not f.endswith(".json"):
continue
yield os.path.join(root, f)
def get_fixture_params(self):
if not self._params:
self._params = sorted(self._iter_params())
return self._params
@classmethod
def fixture_id(cls, path):
return os.path.join(*path.split(os.path.sep)[2:])
def get_object(self, path):
with open(path) as f:
data = orjson.loads(f.read())
self._uuid_count[data["uuid"]] += 1
self._name_count[data["name"]] += 1
if self.cache is None:
# Fill cache
self.cache = {str(o.uuid): o for o in self.model.objects.all()}
try:
return self.cache[data["uuid"]]
except KeyError:
pytest.fail(
"Failed to get object. Pair %s/'%s' is not unique" % (data["uuid"], data["name"])
)
def get_uuid_count(self, uuid):
return self._uuid_count[str(uuid)]
def get_name_count(self, name):
return self._name_count[str(name)]
def teardown(self):
self.cache = None
self._params = []
self._uuid_count = defaultdict(int)
self._name_count = defaultdict(int)
|
the-stack_0_25099
|
"""Hypergeometric and Meijer G-functions"""
from __future__ import print_function, division
from sympy.core import S, I, pi, oo, zoo, ilcm, Mod
from sympy.core.function import Function, Derivative, ArgumentIndexError
from sympy.core.compatibility import reduce
from sympy.core.containers import Tuple
from sympy.core.mul import Mul
from sympy.core.symbol import Dummy
from sympy.functions import (
sqrt,
exp,
log,
sin,
cos,
asin,
atan,
sinh,
cosh,
asinh,
acosh,
atanh,
acoth,
Abs,
)
from sympy.utilities.iterables import default_sort_key
class TupleArg(Tuple):
def limit(self, x, xlim, dir="+"):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return TupleArg(*[limit(f, x, xlim, dir) for f in self.args])
# TODO should __new__ accept **options?
# TODO should constructors should check if parameters are sensible?
def _prep_tuple(v):
"""
Turn an iterable argument *v* into a tuple and unpolarify, since both
hypergeometric and meijer g-functions are unbranched in their parameters.
Examples
========
>>> from sympy.functions.special.hyper import _prep_tuple
>>> _prep_tuple([1, 2, 3])
(1, 2, 3)
>>> _prep_tuple((4, 5))
(4, 5)
>>> _prep_tuple((7, 8, 9))
(7, 8, 9)
"""
from sympy import unpolarify
return TupleArg(*[unpolarify(x) for x in v])
class TupleParametersBase(Function):
""" Base class that takes care of differentiation, when some of
the arguments are actually tuples. """
# This is not deduced automatically since there are Tuples as arguments.
is_commutative = True
def _eval_derivative(self, s):
try:
res = 0
if self.args[0].has(s) or self.args[1].has(s):
for i, p in enumerate(self._diffargs):
m = self._diffargs[i].diff(s)
if m != 0:
res += self.fdiff((1, i)) * m
return res + self.fdiff(3) * self.args[2].diff(s)
except (ArgumentIndexError, NotImplementedError):
return Derivative(self, s)
class hyper(TupleParametersBase):
r"""
The generalized hypergeometric function is defined by a series where
the ratios of successive terms are a rational function of the summation
index. When convergent, it is continued analytically to the largest
possible domain.
Explanation
===========
The hypergeometric function depends on two vectors of parameters, called
the numerator parameters $a_p$, and the denominator parameters
$b_q$. It also has an argument $z$. The series definition is
.. math ::
{}_pF_q\left(\begin{matrix} a_1, \cdots, a_p \\ b_1, \cdots, b_q \end{matrix}
\middle| z \right)
= \sum_{n=0}^\infty \frac{(a_1)_n \cdots (a_p)_n}{(b_1)_n \cdots (b_q)_n}
\frac{z^n}{n!},
where $(a)_n = (a)(a+1)\cdots(a+n-1)$ denotes the rising factorial.
If one of the $b_q$ is a non-positive integer then the series is
undefined unless one of the $a_p$ is a larger (i.e., smaller in
magnitude) non-positive integer. If none of the $b_q$ is a
non-positive integer and one of the $a_p$ is a non-positive
integer, then the series reduces to a polynomial. To simplify the
following discussion, we assume that none of the $a_p$ or
$b_q$ is a non-positive integer. For more details, see the
references.
The series converges for all $z$ if $p \le q$, and thus
defines an entire single-valued function in this case. If $p =
q+1$ the series converges for $|z| < 1$, and can be continued
analytically into a half-plane. If $p > q+1$ the series is
divergent for all $z$.
Please note the hypergeometric function constructor currently does *not*
check if the parameters actually yield a well-defined function.
Examples
========
The parameters $a_p$ and $b_q$ can be passed as arbitrary
iterables, for example:
>>> from sympy.functions import hyper
>>> from sympy.abc import x, n, a
>>> hyper((1, 2, 3), [3, 4], x)
hyper((1, 2, 3), (3, 4), x)
There is also pretty printing (it looks better using Unicode):
>>> from sympy import pprint
>>> pprint(hyper((1, 2, 3), [3, 4], x), use_unicode=False)
_
|_ /1, 2, 3 | \
| | | x|
3 2 \ 3, 4 | /
The parameters must always be iterables, even if they are vectors of
length one or zero:
>>> hyper((1, ), [], x)
hyper((1,), (), x)
But of course they may be variables (but if they depend on $x$ then you
should not expect much implemented functionality):
>>> hyper((n, a), (n**2,), x)
hyper((n, a), (n**2,), x)
The hypergeometric function generalizes many named special functions.
The function ``hyperexpand()`` tries to express a hypergeometric function
using named special functions. For example:
>>> from sympy import hyperexpand
>>> hyperexpand(hyper([], [], x))
exp(x)
You can also use ``expand_func()``:
>>> from sympy import expand_func
>>> expand_func(x*hyper([1, 1], [2], -x))
log(x + 1)
More examples:
>>> from sympy import S
>>> hyperexpand(hyper([], [S(1)/2], -x**2/4))
cos(x)
>>> hyperexpand(x*hyper([S(1)/2, S(1)/2], [S(3)/2], x**2))
asin(x)
We can also sometimes ``hyperexpand()`` parametric functions:
>>> from sympy.abc import a
>>> hyperexpand(hyper([-a], [], x))
(1 - x)**a
See Also
========
sympy.simplify.hyperexpand
gamma
meijerg
References
==========
.. [1] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
.. [2] https://en.wikipedia.org/wiki/Generalized_hypergeometric_function
"""
def __new__(cls, ap, bq, z, **kwargs):
# TODO should we check convergence conditions?
return Function.__new__(cls, _prep_tuple(ap), _prep_tuple(bq), z, **kwargs)
@classmethod
def eval(cls, ap, bq, z):
from sympy import unpolarify
if len(ap) <= len(bq) or (len(ap) == len(bq) + 1 and (Abs(z) <= 1) == True):
nz = unpolarify(z)
if z != nz:
return hyper(ap, bq, nz)
def fdiff(self, argindex=3):
if argindex != 3:
raise ArgumentIndexError(self, argindex)
nap = Tuple(*[a + 1 for a in self.ap])
nbq = Tuple(*[b + 1 for b in self.bq])
fac = Mul(*self.ap) / Mul(*self.bq)
return fac * hyper(nap, nbq, self.argument)
def _eval_expand_func(self, **hints):
from sympy import gamma, hyperexpand
if len(self.ap) == 2 and len(self.bq) == 1 and self.argument == 1:
a, b = self.ap
c = self.bq[0]
return gamma(c) * gamma(c - a - b) / gamma(c - a) / gamma(c - b)
return hyperexpand(self)
def _eval_rewrite_as_Sum(self, ap, bq, z, **kwargs):
from sympy.functions import factorial, RisingFactorial, Piecewise
from sympy import Sum
n = Dummy("n", integer=True)
rfap = Tuple(*[RisingFactorial(a, n) for a in ap])
rfbq = Tuple(*[RisingFactorial(b, n) for b in bq])
coeff = Mul(*rfap) / Mul(*rfbq)
return Piecewise(
(
Sum(coeff * z ** n / factorial(n), (n, 0, oo)),
self.convergence_statement,
),
(self, True),
)
@property
def argument(self):
""" Argument of the hypergeometric function. """
return self.args[2]
@property
def ap(self):
""" Numerator parameters of the hypergeometric function. """
return Tuple(*self.args[0])
@property
def bq(self):
""" Denominator parameters of the hypergeometric function. """
return Tuple(*self.args[1])
@property
def _diffargs(self):
return self.ap + self.bq
@property
def eta(self):
""" A quantity related to the convergence of the series. """
return sum(self.ap) - sum(self.bq)
@property
def radius_of_convergence(self):
"""
Compute the radius of convergence of the defining series.
Explanation
===========
Note that even if this is not ``oo``, the function may still be
evaluated outside of the radius of convergence by analytic
continuation. But if this is zero, then the function is not actually
defined anywhere else.
Examples
========
>>> from sympy.functions import hyper
>>> from sympy.abc import z
>>> hyper((1, 2), [3], z).radius_of_convergence
1
>>> hyper((1, 2, 3), [4], z).radius_of_convergence
0
>>> hyper((1, 2), (3, 4), z).radius_of_convergence
oo
"""
if any(a.is_integer and (a <= 0) == True for a in self.ap + self.bq):
aints = [a for a in self.ap if a.is_Integer and (a <= 0) == True]
bints = [a for a in self.bq if a.is_Integer and (a <= 0) == True]
if len(aints) < len(bints):
return S.Zero
popped = False
for b in bints:
cancelled = False
while aints:
a = aints.pop()
if a >= b:
cancelled = True
break
popped = True
if not cancelled:
return S.Zero
if aints or popped:
# There are still non-positive numerator parameters.
# This is a polynomial.
return oo
if len(self.ap) == len(self.bq) + 1:
return S.One
elif len(self.ap) <= len(self.bq):
return oo
else:
return S.Zero
@property
def convergence_statement(self):
""" Return a condition on z under which the series converges. """
from sympy import And, Or, re, Ne, oo
R = self.radius_of_convergence
if R == 0:
return False
if R == oo:
return True
# The special functions and their approximations, page 44
e = self.eta
z = self.argument
c1 = And(re(e) < 0, abs(z) <= 1)
c2 = And(0 <= re(e), re(e) < 1, abs(z) <= 1, Ne(z, 1))
c3 = And(re(e) >= 1, abs(z) < 1)
return Or(c1, c2, c3)
def _eval_simplify(self, **kwargs):
from sympy.simplify.hyperexpand import hyperexpand
return hyperexpand(self)
def _sage_(self):
import sage.all as sage
ap = [arg._sage_() for arg in self.args[0]]
bq = [arg._sage_() for arg in self.args[1]]
return sage.hypergeometric(ap, bq, self.argument._sage_())
class meijerg(TupleParametersBase):
r"""
The Meijer G-function is defined by a Mellin-Barnes type integral that
resembles an inverse Mellin transform. It generalizes the hypergeometric
functions.
Explanation
===========
The Meijer G-function depends on four sets of parameters. There are
"*numerator parameters*"
$a_1, \ldots, a_n$ and $a_{n+1}, \ldots, a_p$, and there are
"*denominator parameters*"
$b_1, \ldots, b_m$ and $b_{m+1}, \ldots, b_q$.
Confusingly, it is traditionally denoted as follows (note the position
of $m$, $n$, $p$, $q$, and how they relate to the lengths of the four
parameter vectors):
.. math ::
G_{p,q}^{m,n} \left(\begin{matrix}a_1, \cdots, a_n & a_{n+1}, \cdots, a_p \\
b_1, \cdots, b_m & b_{m+1}, \cdots, b_q
\end{matrix} \middle| z \right).
However, in SymPy the four parameter vectors are always available
separately (see examples), so that there is no need to keep track of the
decorating sub- and super-scripts on the G symbol.
The G function is defined as the following integral:
.. math ::
\frac{1}{2 \pi i} \int_L \frac{\prod_{j=1}^m \Gamma(b_j - s)
\prod_{j=1}^n \Gamma(1 - a_j + s)}{\prod_{j=m+1}^q \Gamma(1- b_j +s)
\prod_{j=n+1}^p \Gamma(a_j - s)} z^s \mathrm{d}s,
where $\Gamma(z)$ is the gamma function. There are three possible
contours which we will not describe in detail here (see the references).
If the integral converges along more than one of them, the definitions
agree. The contours all separate the poles of $\Gamma(1-a_j+s)$
from the poles of $\Gamma(b_k-s)$, so in particular the G function
is undefined if $a_j - b_k \in \mathbb{Z}_{>0}$ for some
$j \le n$ and $k \le m$.
The conditions under which one of the contours yields a convergent integral
are complicated and we do not state them here, see the references.
Please note currently the Meijer G-function constructor does *not* check any
convergence conditions.
Examples
========
You can pass the parameters either as four separate vectors:
>>> from sympy.functions import meijerg
>>> from sympy.abc import x, a
>>> from sympy.core.containers import Tuple
>>> from sympy import pprint
>>> pprint(meijerg((1, 2), (a, 4), (5,), [], x), use_unicode=False)
__1, 2 /1, 2 a, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
Or as two nested vectors:
>>> pprint(meijerg([(1, 2), (3, 4)], ([5], Tuple()), x), use_unicode=False)
__1, 2 /1, 2 3, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
As with the hypergeometric function, the parameters may be passed as
arbitrary iterables. Vectors of length zero and one also have to be
passed as iterables. The parameters need not be constants, but if they
depend on the argument then not much implemented functionality should be
expected.
All the subvectors of parameters are available:
>>> from sympy import pprint
>>> g = meijerg([1], [2], [3], [4], x)
>>> pprint(g, use_unicode=False)
__1, 1 /1 2 | \
/__ | | x|
\_|2, 2 \3 4 | /
>>> g.an
(1,)
>>> g.ap
(1, 2)
>>> g.aother
(2,)
>>> g.bm
(3,)
>>> g.bq
(3, 4)
>>> g.bother
(4,)
The Meijer G-function generalizes the hypergeometric functions.
In some cases it can be expressed in terms of hypergeometric functions,
using Slater's theorem. For example:
>>> from sympy import hyperexpand
>>> from sympy.abc import a, b, c
>>> hyperexpand(meijerg([a], [], [c], [b], x), allow_hyper=True)
x**c*gamma(-a + c + 1)*hyper((-a + c + 1,),
(-b + c + 1,), -x)/gamma(-b + c + 1)
Thus the Meijer G-function also subsumes many named functions as special
cases. You can use ``expand_func()`` or ``hyperexpand()`` to (try to)
rewrite a Meijer G-function in terms of named special functions. For
example:
>>> from sympy import expand_func, S
>>> expand_func(meijerg([[],[]], [[0],[]], -x))
exp(x)
>>> hyperexpand(meijerg([[],[]], [[S(1)/2],[0]], (x/2)**2))
sin(x)/sqrt(pi)
See Also
========
hyper
sympy.simplify.hyperexpand
References
==========
.. [1] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
.. [2] https://en.wikipedia.org/wiki/Meijer_G-function
"""
def __new__(cls, *args, **kwargs):
if len(args) == 5:
args = [(args[0], args[1]), (args[2], args[3]), args[4]]
if len(args) != 3:
raise TypeError("args must be either as, as', bs, bs', z or " "as, bs, z")
def tr(p):
if len(p) != 2:
raise TypeError("wrong argument")
return TupleArg(_prep_tuple(p[0]), _prep_tuple(p[1]))
arg0, arg1 = tr(args[0]), tr(args[1])
if Tuple(arg0, arg1).has(oo, zoo, -oo):
raise ValueError("G-function parameters must be finite")
if any((a - b).is_Integer and a - b > 0 for a in arg0[0] for b in arg1[0]):
raise ValueError(
"no parameter a1, ..., an may differ from "
"any b1, ..., bm by a positive integer"
)
# TODO should we check convergence conditions?
return Function.__new__(cls, arg0, arg1, args[2], **kwargs)
def fdiff(self, argindex=3):
if argindex != 3:
return self._diff_wrt_parameter(argindex[1])
if len(self.an) >= 1:
a = list(self.an)
a[0] -= 1
G = meijerg(a, self.aother, self.bm, self.bother, self.argument)
return 1 / self.argument * ((self.an[0] - 1) * self + G)
elif len(self.bm) >= 1:
b = list(self.bm)
b[0] += 1
G = meijerg(self.an, self.aother, b, self.bother, self.argument)
return 1 / self.argument * (self.bm[0] * self - G)
else:
return S.Zero
def _diff_wrt_parameter(self, idx):
# Differentiation wrt a parameter can only be done in very special
# cases. In particular, if we want to differentiate with respect to
# `a`, all other gamma factors have to reduce to rational functions.
#
# Let MT denote mellin transform. Suppose T(-s) is the gamma factor
# appearing in the definition of G. Then
#
# MT(log(z)G(z)) = d/ds T(s) = d/da T(s) + ...
#
# Thus d/da G(z) = log(z)G(z) - ...
# The ... can be evaluated as a G function under the above conditions,
# the formula being most easily derived by using
#
# d Gamma(s + n) Gamma(s + n) / 1 1 1 \
# -- ------------ = ------------ | - + ---- + ... + --------- |
# ds Gamma(s) Gamma(s) \ s s + 1 s + n - 1 /
#
# which follows from the difference equation of the digamma function.
# (There is a similar equation for -n instead of +n).
# We first figure out how to pair the parameters.
an = list(self.an)
ap = list(self.aother)
bm = list(self.bm)
bq = list(self.bother)
if idx < len(an):
an.pop(idx)
else:
idx -= len(an)
if idx < len(ap):
ap.pop(idx)
else:
idx -= len(ap)
if idx < len(bm):
bm.pop(idx)
else:
bq.pop(idx - len(bm))
pairs1 = []
pairs2 = []
for l1, l2, pairs in [(an, bq, pairs1), (ap, bm, pairs2)]:
while l1:
x = l1.pop()
found = None
for i, y in enumerate(l2):
if not Mod((x - y).simplify(), 1):
found = i
break
if found is None:
raise NotImplementedError(
"Derivative not expressible " "as G-function?"
)
y = l2[i]
l2.pop(i)
pairs.append((x, y))
# Now build the result.
res = log(self.argument) * self
for a, b in pairs1:
sign = 1
n = a - b
base = b
if n < 0:
sign = -1
n = b - a
base = a
for k in range(n):
res -= sign * meijerg(
self.an + (base + k + 1,),
self.aother,
self.bm,
self.bother + (base + k + 0,),
self.argument,
)
for a, b in pairs2:
sign = 1
n = b - a
base = a
if n < 0:
sign = -1
n = a - b
base = b
for k in range(n):
res -= sign * meijerg(
self.an,
self.aother + (base + k + 1,),
self.bm + (base + k + 0,),
self.bother,
self.argument,
)
return res
def get_period(self):
"""
Return a number $P$ such that $G(x*exp(I*P)) == G(x)$.
Examples
========
>>> from sympy.functions.special.hyper import meijerg
>>> from sympy.abc import z
>>> from sympy import pi, S
>>> meijerg([1], [], [], [], z).get_period()
2*pi
>>> meijerg([pi], [], [], [], z).get_period()
oo
>>> meijerg([1, 2], [], [], [], z).get_period()
oo
>>> meijerg([1,1], [2], [1, S(1)/2, S(1)/3], [1], z).get_period()
12*pi
"""
# This follows from slater's theorem.
def compute(l):
# first check that no two differ by an integer
for i, b in enumerate(l):
if not b.is_Rational:
return oo
for j in range(i + 1, len(l)):
if not Mod((b - l[j]).simplify(), 1):
return oo
return reduce(ilcm, (x.q for x in l), 1)
beta = compute(self.bm)
alpha = compute(self.an)
p, q = len(self.ap), len(self.bq)
if p == q:
if beta == oo or alpha == oo:
return oo
return 2 * pi * ilcm(alpha, beta)
elif p < q:
return 2 * pi * beta
else:
return 2 * pi * alpha
def _eval_expand_func(self, **hints):
from sympy import hyperexpand
return hyperexpand(self)
def _eval_evalf(self, prec):
# The default code is insufficient for polar arguments.
# mpmath provides an optional argument "r", which evaluates
# G(z**(1/r)). I am not sure what its intended use is, but we hijack it
# here in the following way: to evaluate at a number z of |argument|
# less than (say) n*pi, we put r=1/n, compute z' = root(z, n)
# (carefully so as not to loose the branch information), and evaluate
# G(z'**(1/r)) = G(z'**n) = G(z).
from sympy.functions import exp_polar, ceiling
from sympy import Expr
import mpmath
znum = self.argument._eval_evalf(prec)
if znum.has(exp_polar):
znum, branch = znum.as_coeff_mul(exp_polar)
if len(branch) != 1:
return
branch = branch[0].args[0] / I
else:
branch = S.Zero
n = ceiling(abs(branch / S.Pi)) + 1
znum = znum ** (S.One / n) * exp(I * branch / n)
# Convert all args to mpf or mpc
try:
[z, r, ap, bq] = [
arg._to_mpmath(prec)
for arg in [znum, 1 / n, self.args[0], self.args[1]]
]
except ValueError:
return
with mpmath.workprec(prec):
v = mpmath.meijerg(ap, bq, z, r)
return Expr._from_mpmath(v, prec)
def integrand(self, s):
""" Get the defining integrand D(s). """
from sympy import gamma
return (
self.argument ** s
* Mul(*(gamma(b - s) for b in self.bm))
* Mul(*(gamma(1 - a + s) for a in self.an))
/ Mul(*(gamma(1 - b + s) for b in self.bother))
/ Mul(*(gamma(a - s) for a in self.aother))
)
@property
def argument(self):
""" Argument of the Meijer G-function. """
return self.args[2]
@property
def an(self):
""" First set of numerator parameters. """
return Tuple(*self.args[0][0])
@property
def ap(self):
""" Combined numerator parameters. """
return Tuple(*(self.args[0][0] + self.args[0][1]))
@property
def aother(self):
""" Second set of numerator parameters. """
return Tuple(*self.args[0][1])
@property
def bm(self):
""" First set of denominator parameters. """
return Tuple(*self.args[1][0])
@property
def bq(self):
""" Combined denominator parameters. """
return Tuple(*(self.args[1][0] + self.args[1][1]))
@property
def bother(self):
""" Second set of denominator parameters. """
return Tuple(*self.args[1][1])
@property
def _diffargs(self):
return self.ap + self.bq
@property
def nu(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return sum(self.bq) - sum(self.ap)
@property
def delta(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return len(self.bm) + len(self.an) - S(len(self.ap) + len(self.bq)) / 2
@property
def is_number(self):
""" Returns true if expression has numeric data only. """
return not self.free_symbols
class HyperRep(Function):
"""
A base class for "hyper representation functions".
This is used exclusively in ``hyperexpand()``, but fits more logically here.
pFq is branched at 1 if p == q+1. For use with slater-expansion, we want
define an "analytic continuation" to all polar numbers, which is
continuous on circles and on the ray t*exp_polar(I*pi). Moreover, we want
a "nice" expression for the various cases.
This base class contains the core logic, concrete derived classes only
supply the actual functions.
"""
@classmethod
def eval(cls, *args):
from sympy import unpolarify
newargs = tuple(map(unpolarify, args[:-1])) + args[-1:]
if args != newargs:
return cls(*newargs)
@classmethod
def _expr_small(cls, x):
""" An expression for F(x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_small_minus(cls, x):
""" An expression for F(-x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_big(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n)*x), |x| > 1. """
raise NotImplementedError
@classmethod
def _expr_big_minus(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n + pi*I)*x), |x| > 1. """
raise NotImplementedError
def _eval_rewrite_as_nonrep(self, *args, **kwargs):
from sympy import Piecewise
x, n = self.args[-1].extract_branch_factor(allow_half=True)
minus = False
newargs = self.args[:-1] + (x,)
if not n.is_Integer:
minus = True
n -= S.Half
newerargs = newargs + (n,)
if minus:
small = self._expr_small_minus(*newargs)
big = self._expr_big_minus(*newerargs)
else:
small = self._expr_small(*newargs)
big = self._expr_big(*newerargs)
if big == small:
return small
return Piecewise((big, abs(x) > 1), (small, True))
def _eval_rewrite_as_nonrepsmall(self, *args, **kwargs):
x, n = self.args[-1].extract_branch_factor(allow_half=True)
args = self.args[:-1] + (x,)
if not n.is_Integer:
return self._expr_small_minus(*args)
return self._expr_small(*args)
class HyperRep_power1(HyperRep):
""" Return a representative for hyper([-a], [], z) == (1 - z)**a. """
@classmethod
def _expr_small(cls, a, x):
return (1 - x) ** a
@classmethod
def _expr_small_minus(cls, a, x):
return (1 + x) ** a
@classmethod
def _expr_big(cls, a, x, n):
if a.is_integer:
return cls._expr_small(a, x)
return (x - 1) ** a * exp((2 * n - 1) * pi * I * a)
@classmethod
def _expr_big_minus(cls, a, x, n):
if a.is_integer:
return cls._expr_small_minus(a, x)
return (1 + x) ** a * exp(2 * n * pi * I * a)
class HyperRep_power2(HyperRep):
""" Return a representative for hyper([a, a - 1/2], [2*a], z). """
@classmethod
def _expr_small(cls, a, x):
return 2 ** (2 * a - 1) * (1 + sqrt(1 - x)) ** (1 - 2 * a)
@classmethod
def _expr_small_minus(cls, a, x):
return 2 ** (2 * a - 1) * (1 + sqrt(1 + x)) ** (1 - 2 * a)
@classmethod
def _expr_big(cls, a, x, n):
sgn = -1
if n.is_odd:
sgn = 1
n -= 1
return (
2 ** (2 * a - 1)
* (1 + sgn * I * sqrt(x - 1)) ** (1 - 2 * a)
* exp(-2 * n * pi * I * a)
)
@classmethod
def _expr_big_minus(cls, a, x, n):
sgn = 1
if n.is_odd:
sgn = -1
return (
sgn
* 2 ** (2 * a - 1)
* (sqrt(1 + x) + sgn) ** (1 - 2 * a)
* exp(-2 * pi * I * a * n)
)
class HyperRep_log1(HyperRep):
""" Represent -z*hyper([1, 1], [2], z) == log(1 - z). """
@classmethod
def _expr_small(cls, x):
return log(1 - x)
@classmethod
def _expr_small_minus(cls, x):
return log(1 + x)
@classmethod
def _expr_big(cls, x, n):
return log(x - 1) + (2 * n - 1) * pi * I
@classmethod
def _expr_big_minus(cls, x, n):
return log(1 + x) + 2 * n * pi * I
class HyperRep_atanh(HyperRep):
""" Represent hyper([1/2, 1], [3/2], z) == atanh(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, x):
return atanh(sqrt(x)) / sqrt(x)
def _expr_small_minus(cls, x):
return atan(sqrt(x)) / sqrt(x)
def _expr_big(cls, x, n):
if n.is_even:
return (acoth(sqrt(x)) + I * pi / 2) / sqrt(x)
else:
return (acoth(sqrt(x)) - I * pi / 2) / sqrt(x)
def _expr_big_minus(cls, x, n):
if n.is_even:
return atan(sqrt(x)) / sqrt(x)
else:
return (atan(sqrt(x)) - pi) / sqrt(x)
class HyperRep_asin1(HyperRep):
""" Represent hyper([1/2, 1/2], [3/2], z) == asin(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, z):
return asin(sqrt(z)) / sqrt(z)
@classmethod
def _expr_small_minus(cls, z):
return asinh(sqrt(z)) / sqrt(z)
@classmethod
def _expr_big(cls, z, n):
return S.NegativeOne ** n * (
(S.Half - n) * pi / sqrt(z) + I * acosh(sqrt(z)) / sqrt(z)
)
@classmethod
def _expr_big_minus(cls, z, n):
return S.NegativeOne ** n * (asinh(sqrt(z)) / sqrt(z) + n * pi * I / sqrt(z))
class HyperRep_asin2(HyperRep):
""" Represent hyper([1, 1], [3/2], z) == asin(sqrt(z))/sqrt(z)/sqrt(1-z). """
# TODO this can be nicer
@classmethod
def _expr_small(cls, z):
return HyperRep_asin1._expr_small(z) / HyperRep_power1._expr_small(S.Half, z)
@classmethod
def _expr_small_minus(cls, z):
return HyperRep_asin1._expr_small_minus(z) / HyperRep_power1._expr_small_minus(
S.Half, z
)
@classmethod
def _expr_big(cls, z, n):
return HyperRep_asin1._expr_big(z, n) / HyperRep_power1._expr_big(S.Half, z, n)
@classmethod
def _expr_big_minus(cls, z, n):
return HyperRep_asin1._expr_big_minus(z, n) / HyperRep_power1._expr_big_minus(
S.Half, z, n
)
class HyperRep_sqrts1(HyperRep):
""" Return a representative for hyper([-a, 1/2 - a], [1/2], z). """
@classmethod
def _expr_small(cls, a, z):
return ((1 - sqrt(z)) ** (2 * a) + (1 + sqrt(z)) ** (2 * a)) / 2
@classmethod
def _expr_small_minus(cls, a, z):
return (1 + z) ** a * cos(2 * a * atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return (
(sqrt(z) + 1) ** (2 * a) * exp(2 * pi * I * n * a)
+ (sqrt(z) - 1) ** (2 * a) * exp(2 * pi * I * (n - 1) * a)
) / 2
else:
n -= 1
return (
(sqrt(z) - 1) ** (2 * a) * exp(2 * pi * I * a * (n + 1))
+ (sqrt(z) + 1) ** (2 * a) * exp(2 * pi * I * a * n)
) / 2
@classmethod
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z) ** a * exp(2 * pi * I * n * a) * cos(2 * a * atan(sqrt(z)))
else:
return (
(1 + z) ** a
* exp(2 * pi * I * n * a)
* cos(2 * a * atan(sqrt(z)) - 2 * pi * a)
)
class HyperRep_sqrts2(HyperRep):
""" Return a representative for
sqrt(z)/2*[(1-sqrt(z))**2a - (1 + sqrt(z))**2a]
== -2*z/(2*a+1) d/dz hyper([-a - 1/2, -a], [1/2], z)"""
@classmethod
def _expr_small(cls, a, z):
return sqrt(z) * ((1 - sqrt(z)) ** (2 * a) - (1 + sqrt(z)) ** (2 * a)) / 2
@classmethod
def _expr_small_minus(cls, a, z):
return sqrt(z) * (1 + z) ** a * sin(2 * a * atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return (
sqrt(z)
/ 2
* (
(sqrt(z) - 1) ** (2 * a) * exp(2 * pi * I * a * (n - 1))
- (sqrt(z) + 1) ** (2 * a) * exp(2 * pi * I * a * n)
)
)
else:
n -= 1
return (
sqrt(z)
/ 2
* (
(sqrt(z) - 1) ** (2 * a) * exp(2 * pi * I * a * (n + 1))
- (sqrt(z) + 1) ** (2 * a) * exp(2 * pi * I * a * n)
)
)
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (
(1 + z) ** a
* exp(2 * pi * I * n * a)
* sqrt(z)
* sin(2 * a * atan(sqrt(z)))
)
else:
return (
(1 + z) ** a
* exp(2 * pi * I * n * a)
* sqrt(z)
* sin(2 * a * atan(sqrt(z)) - 2 * pi * a)
)
class HyperRep_log2(HyperRep):
""" Represent log(1/2 + sqrt(1 - z)/2) == -z/4*hyper([3/2, 1, 1], [2, 2], z) """
@classmethod
def _expr_small(cls, z):
return log(S.Half + sqrt(1 - z) / 2)
@classmethod
def _expr_small_minus(cls, z):
return log(S.Half + sqrt(1 + z) / 2)
@classmethod
def _expr_big(cls, z, n):
if n.is_even:
return (n - S.Half) * pi * I + log(sqrt(z) / 2) + I * asin(1 / sqrt(z))
else:
return (n - S.Half) * pi * I + log(sqrt(z) / 2) - I * asin(1 / sqrt(z))
def _expr_big_minus(cls, z, n):
if n.is_even:
return pi * I * n + log(S.Half + sqrt(1 + z) / 2)
else:
return pi * I * n + log(sqrt(1 + z) / 2 - S.Half)
class HyperRep_cosasin(HyperRep):
""" Represent hyper([a, -a], [1/2], z) == cos(2*a*asin(sqrt(z))). """
# Note there are many alternative expressions, e.g. as powers of a sum of
# square roots.
@classmethod
def _expr_small(cls, a, z):
return cos(2 * a * asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return cosh(2 * a * asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return cosh(2 * a * acosh(sqrt(z)) + a * pi * I * (2 * n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return cosh(2 * a * asinh(sqrt(z)) + 2 * a * pi * I * n)
class HyperRep_sinasin(HyperRep):
""" Represent 2*a*z*hyper([1 - a, 1 + a], [3/2], z)
== sqrt(z)/sqrt(1-z)*sin(2*a*asin(sqrt(z))) """
@classmethod
def _expr_small(cls, a, z):
return sqrt(z) / sqrt(1 - z) * sin(2 * a * asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return -sqrt(z) / sqrt(1 + z) * sinh(2 * a * asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return (
-1
/ sqrt(1 - 1 / z)
* sinh(2 * a * acosh(sqrt(z)) + a * pi * I * (2 * n - 1))
)
@classmethod
def _expr_big_minus(cls, a, z, n):
return -1 / sqrt(1 + 1 / z) * sinh(2 * a * asinh(sqrt(z)) + 2 * a * pi * I * n)
class appellf1(Function):
r"""
This is the Appell hypergeometric function of two variables as:
.. math ::
F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
References
==========
.. [1] https://en.wikipedia.org/wiki/Appell_series
.. [2] http://functions.wolfram.com/HypergeometricFunctions/AppellF1/
"""
@classmethod
def eval(cls, a, b1, b2, c, x, y):
if default_sort_key(b1) > default_sort_key(b2):
b1, b2 = b2, b1
x, y = y, x
return cls(a, b1, b2, c, x, y)
elif b1 == b2 and default_sort_key(x) > default_sort_key(y):
x, y = y, x
return cls(a, b1, b2, c, x, y)
if x == 0 and y == 0:
return S.One
def fdiff(self, argindex=5):
a, b1, b2, c, x, y = self.args
if argindex == 5:
return (a * b1 / c) * appellf1(a + 1, b1 + 1, b2, c + 1, x, y)
elif argindex == 6:
return (a * b2 / c) * appellf1(a + 1, b1, b2 + 1, c + 1, x, y)
elif argindex in (1, 2, 3, 4):
return Derivative(self, self.args[argindex - 1])
else:
raise ArgumentIndexError(self, argindex)
|
the-stack_0_25102
|
from django.contrib.gis.geos import Point
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from customers.models import Customer
from drivers.models import Cab, Driver, Vehicle
# Create your tests here.
class TripTests(APITestCase):
def setUp(self):
driver1 = {
'first_name': 'soy',
'last_name': 'driver1',
'document_number': '11111'
}
driver2 = {
'first_name': 'soy',
'last_name': 'driver2',
'document_number': '22222'
}
driver3 = {
'first_name': 'soy',
'last_name': 'driver3',
'document_number': '333333'
}
driver4 = {
'first_name': 'soy',
'last_name': 'driver4',
'document_number': '44444'
}
self.driver_1 = self.create_helper(Driver, driver1)
self.driver_2 = self.create_helper(Driver, driver2)
self.driver_3 = self.create_helper(Driver, driver3)
self.driver_4 = self.create_helper(Driver, driver4)
vehicle1 = {
'number_plate': 'kl-111',
}
vehicle2 = {
'number_plate': 'kl-222',
}
vehicle3 = {
'number_plate': 'kl-333',
}
vehicle4 = {
'number_plate': 'kl-444',
}
self.vehicle_1 = self.create_helper(Vehicle, vehicle1)
self.vehicle_2 = self.create_helper(Vehicle, vehicle2)
self.vehicle_3 = self.create_helper(Vehicle, vehicle3)
self.vehicle_4 = self.create_helper(Vehicle, vehicle4)
cab1_data = {
'driver_id': self.driver_1,
'vehicle_id': self.vehicle_1,
'location': Point([1000,2000]),
'state': 0,
}
cab2_data = {
'driver_id': self.driver_2,
'vehicle_id': self.vehicle_2,
'location': Point([100, 200]),
'state': 0,
}
cab3_data = {
'driver_id': self.driver_3,
'vehicle_id': self.vehicle_3,
'location': Point([10, 20]),
'state': 0,
}
cab4_data = {
'driver_id': self.driver_4,
'vehicle_id': self.vehicle_4,
'location': Point([50, -10]),
'state': 0,
}
self.cab1 = self.create_helper(Cab, cab1_data)
self.cab2 = self.create_helper(Cab, cab2_data)
self.cab3 = self.create_helper(Cab, cab3_data)
self.cab4 = self.create_helper(Cab, cab4_data)
customer1 = {
'first_name': 'soy',
'last_name': 'customer',
'document_number': '333444'
}
self.customer_1 = self.create_helper(Customer, customer1)
self.request_trip = None
self.client = APIClient()
def create_helper(self, model=None, d={}):
return model.objects.create(**d)
def test_request_trip(self):
path = '/api-v1.0/request/'
data = {
'customer_id': self.customer_1.id,
'cab_id': self.cab1.id,
'location': 'Point(20 20)',
}
response_request = self.client.post(path, data)
self.request_trip = response_request.json()
# test create request trip
self.assertEquals(status.HTTP_201_CREATED, response_request.status_code)
response2 = self.client.get(path)
self.assertEquals(len(response2.json()), 1)
# test top 3 nearest cabs
response10 = self.client.get(f"{path}{response_request.json()['id']}/find_nearest_cabs/")
self.assertEquals(status.HTTP_200_OK, response10.status_code)
trip_url = '/api-v1.0/trip/'
# test create trip
data_trip = {
'request_id': self.request_trip['id'],
'price': 100.00
}
trip = self.client.post(f"{trip_url}", data_trip)
self.assertEquals(status.HTTP_201_CREATED, trip.status_code)
trip_id = trip.json()['id']
# test get trip by id
response = self.client.get(f"{trip_url}{trip_id}/")
self.assertEquals(status.HTTP_200_OK, response.status_code)
# test get all trips
response2 = self.client.get(trip_url)
self.assertEquals(status.HTTP_200_OK, response2.status_code)
# test get all availables trips
response3 = self.client.get(f"{trip_url}availables/")
self.assertEquals(status.HTTP_200_OK, response3.status_code)
# test start trip
response4 = self.client.patch(f"{trip_url}{trip_id}/start/")
self.assertEquals(status.HTTP_204_NO_CONTENT, response4.status_code)
self.assertTrue(response4.data.get('start_date', False))
# test complete trip
response5 = self.client.patch(f"{trip_url}{trip_id}/complete/")
self.assertEquals(status.HTTP_204_NO_CONTENT, response5.status_code)
self.assertTrue(response5.data.get('end_date', False))
# test create invoice after complete trip
response6 = self.client.patch(f"{trip_url}{trip_id}/invoices/")
self.assertEquals(len(response6.json()), 1)
|
the-stack_0_25103
|
import numpy as np
from PIL import Image # pillow
import sys
import pygame
from .games.base.pygamewrapper import PyGameWrapper
class PLE(object):
"""
ple.PLE(
game, fps=30,
frame_skip=1, num_steps=1,
reward_values={}, force_fps=True,
display_screen=False, add_noop_action=True,
NOOP=K_F15, state_preprocessor=None,
rng=24
)
Main wrapper that interacts with games.
Provides a similar interface to Arcade Learning Environment.
Parameters
----------
game: Class from ple.games.base
The game the PLE environment manipulates and maintains.
fps: int (default: 30)
The desired frames per second we want to run our game at.
Typical settings are 30 and 60 fps.
frame_skip: int (default: 1)
The number of times we skip getting observations while
repeat an action.
num_steps: int (default: 1)
The number of times we repeat an action.
reward_values: dict
This contains the rewards we wish to set give our agent based on
different actions in game. The current defaults are as follows:
.. code-block:: python
rewards = {
"positive": 1.0,
"negative": -1.0,
"tick": 0.0,
"loss": -5.0,
"win": 5.0
}
Tick is given to the agent at each game step. You can selectively
adjust the rewards by passing a dictonary with the key you want to
change. Eg. If we want to adjust the negative reward and the tick
reward we would pass in the following:
.. code-block:: python
rewards = {
"negative": -2.0,
"tick": -0.01
}
Keep in mind that the tick is applied at each frame. If the game is
running at 60fps the agent will get a reward of 60*tick.
force_fps: bool (default: True)
If False PLE delays between game.step() calls to ensure the fps is
specified. If not PLE passes an elapsed time delta to ensure the
game steps by an amount of time consistent with the specified fps.
This is usally set to True as it allows the game to run as fast as
possible which speeds up training.
display_screen: bool (default: False)
If we draw updates to the screen. Disabling this speeds up
interation speed. This can be toggled to True during testing phases
so you can observe the agents progress.
add_noop_action: bool (default: True)
This inserts the NOOP action specified as a valid move the agent
can make.
state_preprocessor: python function (default: None)
Python function which takes a dict representing game state and
returns a numpy array.
rng: numpy.random.RandomState, int, array_like or None. (default: 24)
Number generator which is used by PLE and the games.
"""
def __init__(self,
game, fps=30, frame_skip=1, num_steps=1,
reward_values={}, force_fps=True, display_screen=False,
add_noop_action=True, state_preprocessor=None, rng=24):
self.game = game
self.fps = fps
self.frame_skip = frame_skip
self.NOOP = None
self.num_steps = num_steps
self.force_fps = force_fps
self.display_screen = display_screen
self.add_noop_action = add_noop_action
self.last_action = []
self.action = []
self.previous_score = 0
self.frame_count = 0
# self.map = mapname
# update the scores of games with values we pick
if reward_values:
# print("we here adjusting rewards")
self.game.adjustRewards(reward_values)
print(reward_values)
if isinstance(self.game, PyGameWrapper):
# print("is instance true")
# print(rng)
if isinstance(rng, np.random.RandomState):
self.rng = rng
else:
self.rng = np.random.RandomState(rng)
# some pygame games preload the images
# to speed resetting and inits up.
pygame.display.set_mode((1, 1), pygame.NOFRAME)
else:
# in order to use doom, install following https://github.com/openai/doom-py
from .games.base.doomwrapper import DoomWrapper
if isinstance(self.game, DoomWrapper):
self.rng = rng
self.game.setRNG(self.rng)
self.init()
self.state_preprocessor = state_preprocessor
self.state_dim = None
if self.state_preprocessor is not None:
self.state_dim = self.game.getGameState()
if self.state_dim is None:
raise ValueError(
"Asked to return non-visual state on game that does not support it!")
else:
self.state_dim = self.state_preprocessor(self.state_dim).shape
if game.allowed_fps is not None and self.fps != game.allowed_fps:
raise ValueError("Game requires %dfps, was given %d." %
(game.allowed_fps, game.allowed_fps))
def _tick(self):
"""
Calculates the elapsed time between frames or ticks.
"""
if self.force_fps:
return 1000.0 / self.fps
else:
return self.game.tick(self.fps)
def init(self):
"""
Initializes the game. This depends on the game and could include
doing things such as setting up the display, clock etc.
This method should be explicitly called.
"""
self.game._setup()
self.game.init() #this is the games setup/init
def getActionSet(self):
"""
Gets the actions the game supports. Optionally inserts the NOOP
action if PLE has add_noop_action set to True.
Returns
--------
list of pygame.constants
The agent can simply select the index of the action
to perform.
"""
actions = self.game.actions
# print(self.game.actions)
if (sys.version_info > (3, 0)): #python ver. 3
if isinstance(actions, dict) or isinstance(actions, dict_values):
actions = actions.values()
else:
if isinstance(actions, dict):
actions = actions.values()
actions = list(actions) #.values()
#print (actions)
#assert isinstance(actions, list), "actions is not a list"
if self.add_noop_action:
actions.append(self.NOOP)
return actions
def getFrameNumber(self):
"""
Gets the current number of frames the agent has seen
since PLE was initialized.
Returns
--------
int
"""
return self.frame_count
def game_over(self):
"""
Returns True if the game has reached a terminal state and
False otherwise.
This state is game dependent.
Returns
-------
bool
"""
return self.game.game_over()
def score(self):
"""
Gets the score the agent currently has in game.
Returns
-------
int
"""
return self.game.getScore()
def lives(self):
"""
Gets the number of lives the agent has left. Not all games have
the concept of lives.
Returns
-------
int
"""
return self.game.lives
def reset_game(self):
"""
Performs a reset of the games to a clean initial state.
"""
self.last_action = []
self.action = []
self.previous_score = 0.0
self.game.reset()
def getScreenRGB(self):
"""
Gets the current game screen in RGB format.
Returns
--------
numpy uint8 array
Returns a numpy array with the shape (width, height, 3).
"""
return self.game.getScreenRGB()
def getScreenGrayscale(self):
"""
Gets the current game screen in Grayscale format. Converts from RGB using relative lumiance.
Returns
--------
numpy uint8 array
Returns a numpy array with the shape (width, height).
"""
frame = self.getScreenRGB()
frame = 0.21 * frame[:, :, 0] + 0.72 * \
frame[:, :, 1] + 0.07 * frame[:, :, 2]
frame = np.round(frame).astype(np.uint8)
return frame
def saveScreen(self, filename):
"""
Saves the current screen to png file.
Parameters
----------
filename : string
The path with filename to where we want the image saved.
"""
frame = Image.fromarray(self.getScreenRGB())
frame.save(filename)
def getScreenDims(self):
"""
Gets the games screen dimensions.
Returns
-------
tuple of int
Returns a tuple of the following format (screen_width, screen_height).
"""
return self.game.getScreenDims()
def getGameStateDims(self):
"""
Gets the games non-visual state dimensions.
Returns
-------
tuple of int or None
Returns a tuple of the state vectors shape or None if the game does not support it.
"""
return self.state_dim
def getGameState(self):
"""
Gets a non-visual state representation of the game.
This can include items such as player position, velocity, ball location and velocity etc.
Returns
-------
dict or None
It returns a dict of game information. This greatly depends on the game in question and must be referenced against each game.
If no state is available or supported None will be returned back.
"""
state = self.game.getGameState()
if state is not None and self.state_preprocessor is not None:
return self.state_preprocessor(state)
else:
raise ValueError(
"Was asked to return state vector for game that does not support it!")
def act(self, action):
"""
Perform an action on the game. We lockstep frames with actions. If act is not called the game will not run.
Parameters
----------
action : int
The index of the action we wish to perform. The index usually corresponds to the index item returned by getActionSet().
Returns
-------
int
Returns the reward that the agent has accumlated while performing the action.
"""
return sum(self._oneStepAct(action) for i in range(self.frame_skip))
def _draw_frame(self):
"""
Decides if the screen will be drawn too
"""
self.game._draw_frame(self.display_screen)
def _oneStepAct(self, action):
"""
Performs an action on the game. Checks if the game is over or if the provided action is valid based on the allowed action set.
"""
if self.game_over():
return 0.0
if action not in self.getActionSet():
action = self.NOOP
self._setAction(action)
for i in range(self.num_steps):
time_elapsed = self._tick()
self.game.step(time_elapsed)
self._draw_frame()
self.frame_count += self.num_steps
return self._getReward()
def _setAction(self, action):
"""
Instructs the game to perform an action if its not a NOOP
"""
if action is not None:
self.game._setAction(action, self.last_action)
self.last_action = action
def _getReward(self):
"""
Returns the reward the agent has gained as the difference between the last action and the current one.
"""
reward = self.game.getScore() - self.previous_score
self.previous_score = self.game.getScore()
return reward
|
the-stack_0_25105
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Mike Taylor
#
# This file is part of printio released under MIT license.
# See the LICENSE for more information.
"""
The printio library in Python.
"""
__author__ = "Mike Taylor"
__version__ = "0.0.2"
__email__ = "[email protected]"
__homepage__ = "http://github.com/taylortree/printio"
__copyright__ = "Copyright 2011, Mike Taylor <[email protected]>"
__license__ = "MIT"
from core import PrettyValue
from core import PrettyValues
|
the-stack_0_25107
|
from .common import MEDIAWIKI_API_URL
from rdfsync.wb2rdf.conversion import Converter
converter = Converter(endpoint=MEDIAWIKI_API_URL, input_format='ttl', day_num=365)
items_changed_during_last_year_list \
= ['Q19', 'Q13', 'Q21', 'Q15', 'P26', 'Q12', 'Q18', 'P30', 'P11', 'Q20', 'Q14', 'Q17', 'Q11', 'Q5', 'Q16']
def test_list_of_items_to_change():
items_list = converter.get_items_properties_to_sync()
print('hihi\n' + str(items_list))
assert items_list.issubset(items_changed_during_last_year_list)
|
the-stack_0_25109
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'train': [True, False],
'shape': [(3, 2), (5, 6)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestRReLU(unittest.TestCase):
def setUp(self):
# Avoid unstability of numerical grad
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.x[(-0.05 < self.x) & (self.x < 0.05)] = 0.5
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Assumption l < u
self.l = numpy.random.uniform(0, 1)
self.u = numpy.random.uniform(0, 1)
if self.l >= self.u:
self.l, self.u = self.u, self.l
self.check_forward_options = {}
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data):
x = chainer.Variable(x_data)
xp = backend.get_array_module(x)
with chainer.using_config('train', self.train):
y, r = functions.rrelu(x, l=self.l, u=self.u, return_r=True)
self.assertEqual(y.data.dtype, self.dtype)
expected = xp.where(x_data >= 0, x_data, x_data * r)
testing.assert_allclose(
expected, y.data, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
xp = backend.get_array_module(x_data)
r = xp.random.uniform(self.l, self.u, x_data.shape).astype(
x_data.dtype)
def f(x):
return functions.rrelu(
x, self.l, self.u,
r=r.astype(x.dtype) # check_backward casts only x
)
with chainer.using_config('train', self.train):
gradient_check.check_backward(
f, x_data, y_grad, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'specify_r': [True, False],
'train': [True, False],
'shape': [(3, 2), (5, 6)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestRReLUR(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.x[(-0.05 < self.x) & (self.x < 0.05)] = 0.5
# Assumption l < u
self.l = numpy.random.uniform(0, 1)
self.u = numpy.random.uniform(0, 1)
if self.l >= self.u:
self.l, self.u = self.u, self.l
self.r = numpy.random.uniform(
self.l, self.u, self.x.shape).astype(self.x.dtype)
def _check(self):
r = self.r if self.specify_r else None
with chainer.using_config('train', self.train):
out, out_r = functions.rrelu(
self.x, self.l, self.u, r=r, return_r=True)
assert isinstance(out_r, type(out.array))
if r is None:
assert out_r.shape == out.array.shape
else:
if self.train:
assert out_r is r
def test_cpu(self):
with chainer.using_config('use_ideep', 'never'):
self._check()
@attr.gpu
def test_gpu(self):
self.x = cuda.to_gpu(self.x)
self.r = cuda.to_gpu(self.r)
self._check()
testing.run_module(__name__, __file__)
|
the-stack_0_25110
|
# Copyright 2008-2013 Yousef Ourabi
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import django
from django.conf import settings
from django.http import HttpResponseForbidden, HttpResponseRedirect, HttpResponse
from django.core.exceptions import MiddlewareNotUsed
from django.core.cache import cache
from django.template import loader
# Own model
from models import Banishment, Whitelist
class BanishMiddleware(object):
def __init__(self):
"""
Middleware init is called once per server on startup - do the heavy
lifting here.
"""
# If disabled or not enabled raise MiddleWareNotUsed so django
# processes next middleware.
self.ENABLED = getattr(settings, 'BANISH_ENABLED', False)
self.DEBUG = getattr(settings, 'BANISH_DEBUG', False)
self.USE_HTTP_X_FORWARDED_FOR = getattr(settings, 'BANISH_USE_HTTP_X_FORWARDED_FOR', False)
self.BANISH_EMPTY_UA = getattr(settings, 'BANISH_EMPTY_UA', True)
self.BANISH_MESSAGE = getattr(settings, 'BANISH_MESSAGE', "You are banned.")
self.BANISH_RESTRICT_FILTER = getattr(settings, 'BANISH_RESTRICT_FILTER', False)
self.BANISH_URL_REDIRECT = getattr(settings, 'BANISH_URL_REDIRECT', None)
self.BANISH_TEMPLATRE = getattr(settings, 'BANISH_TEMPLATRE', None)
self.BANISH_TOR_IPS = getattr(settings, 'BANISH_TOR_IPS', False)
self.BANISH_ONLY_WHITELIST = getattr(settings, 'BANISH_ONLY_WHITELIST', False)
# New version
self.BANISH_ABUSE_THRESHOLD_TO_URL = getattr(settings, 'BANISH_ABUSE_THRESHOLD_TO_URL', 10000)
self.DEFAULT_BANISH_ABUSE_THRESHOLD = getattr(settings, 'DEFAULT_BANISH_ABUSE_THRESHOLD', False)
if not self.ENABLED:
raise MiddlewareNotUsed(
"django-banish is not enabled via settings.py")
if self.DEBUG:
print >> sys.stderr, "[django-banish] status = enabled"
# Prefix All keys in cache to avoid key collisions
self.BANISH_PREFIX = 'DJANGO_BANISH:'
self.ABUSE_PREFIX = 'DJANGO_BANISH_ABUSE:'
self.WHITELIST_PREFIX = 'DJANGO_BANISH_WHITELIST:'
self.BANNED_AGENTS = []
if self.BANISH_EMPTY_UA:
self.BANNED_AGENTS.append(None)
# Populate various 'banish' buckets
for ban in Banishment.objects.all():
if self.DEBUG:
print >> sys.stderr, "IP BANISHMENT: ", ban.type
if ban.type == 'ip-address':
cache_key = self.BANISH_PREFIX + ban.condition
cache.set(cache_key, "1")
if ban.type == 'user-agent':
self.BANNED_AGENTS.append(ban.condition)
for whitelist in Whitelist.objects.all():
if whitelist.type == 'ip-address-whitelist':
cache_key = self.WHITELIST_PREFIX + whitelist.condition
cache.set(cache_key, "1")
def _get_path(self, request):
return request.path
def _get_ip(self, request):
ip = request.META['REMOTE_ADDR']
if self.USE_HTTP_X_FORWARDED_FOR or not ip or ip == '127.0.0.1':
ip = request.META.get('HTTP_X_FORWARDED_FOR', ip).split(',')[0].strip()
return ip
def _is_tor_ip(self, ip):
""" Checks if ip address is a TOR exit node.
Relies on periodically updated IP list.
If IP list update has failed then gracefully assumes
there are no Tor exit nodes. This is so that
our service continues to function even if the external
party we are relying on goes down.
:param ip: IP address as a string
"""
TOR_CACHE_KEY = getattr(settings, 'TOR_CACHE_KEY')
ips = cache.get(TOR_CACHE_KEY)
if not ips:
# Tor IP list not available; IP check not active
return False
return ip in ips
def process_request(self, request):
abuse_threshold = self.DEFAULT_BANISH_ABUSE_THRESHOLD
url_name = "all"
ip = self._get_ip(request)
user_agent = request.META.get('HTTP_USER_AGENT', None)
if self.BANISH_RESTRICT_FILTER:
for threshold_to_url in self.BANISH_ABUSE_THRESHOLD_TO_URL:
if (self._get_path(request).find(threshold_to_url.get('url')) >= 0):
abuse_threshold = threshold_to_url.get(u'threshold')
url = threshold_to_url.get(u'url')
url_name = threshold_to_url.get(u'name')
if self.DEBUG:
print >> sys.stderr, "Request URL in BANISH_ABUSE_THRESHOLD_TO_URL: %s with %s" % (url, abuse_threshold)
if self.DEBUG:
print >> sys.stderr, "GOT IP FROM Request: %s and User Agent %s" % (ip, user_agent)
# Check whitelist first, if not allowed, then check ban conditions
if self.is_whitelisted(ip):
return None
elif self.is_banned(ip) or \
self.monitor_abuse(ip, abuse_threshold, url_name) or \
self.BANISH_ONLY_WHITELIST or \
user_agent in self.BANNED_AGENTS:
if self.BANISH_URL_REDIRECT:
return self.redirect_response_forbidden(self.BANISH_URL_REDIRECT)
elif self.BANISH_TEMPLATRE:
return self.template_response_forbidden(request, self.BANISH_TEMPLATRE)
else:
return self.http_response_forbidden(self.BANISH_MESSAGE, content_type="text/html")
else:
if self._is_tor_ip(ip) and self.BANISH_TOR_IPS:
return self.http_response_forbidden("Banish TOR ip", content_type="text/html")
def http_response_forbidden(self, message, content_type):
if django.VERSION[:2] > (1, 3):
kwargs = {'content_type': content_type}
else:
kwargs = {'mimetype': content_type}
return HttpResponseForbidden(message, **kwargs)
def redirect_response_forbidden(self, url):
return HttpResponseRedirect(url)
def template_response_forbidden(self, request, template):
t = loader.get_template(template)
return HttpResponse(t.render({}, request))
def is_banned(self, ip):
# If a key BANISH MC key exists we know the user is banned.
is_banned = cache.get(self.BANISH_PREFIX + ip)
if self.DEBUG and is_banned:
print >> sys.stderr, "BANISH BANNED IP: ", self.BANISH_PREFIX + ip
return is_banned
def is_whitelisted(self, ip):
# If a whitelist key exists, return True to allow the request through
is_whitelisted = cache.get(self.WHITELIST_PREFIX + ip)
if self.DEBUG and is_whitelisted:
print >> sys.stderr, "BANISH WHITELISTED IP: ", self.WHITELIST_PREFIX + ip
return is_whitelisted
def monitor_abuse(self, ip, abuse_threshold, url_name):
"""
Track the number of hits per second for a given IP.
If the count is over ABUSE_THRESHOLD banish user
"""
cache_key = self.ABUSE_PREFIX + ip + ":" + url_name
abuse_count = cache.get(cache_key)
if self.DEBUG:
print >> sys.stderr, "BANISH ABUSE COUNT: ", abuse_count
print >> sys.stderr, "BANISH CACHE KEY: ", cache_key
over_abuse_limit = False
if not abuse_count:
# Set counter value with expiration time 1 minute
cache.set(cache_key, 1, 60)
else:
if abuse_count >= abuse_threshold:
over_abuse_limit = True
# Store IP Abuse in memcache and database
# Chck if exist in database
oldbanishment = Banishment.objects.filter(condition=ip + ":" + url_name).exists()
# If not exist save
if not oldbanishment:
ban = Banishment(ban_reason="IP Abuse limit exceeded", type="ip-address", condition=ip + ":" + url_name)
ban.save()
# Rewrite banish with infinite time expiration.
cache.set(cache_key, "1")
else:
# If no excess abuse count only increment.
try:
cache.incr(cache_key)
except ValueError:
pass
return over_abuse_limit
|
the-stack_0_25112
|
"""Twitter authentication functions"""
import pickle
import webbrowser
import os.path
import tweepy
def deserialize_token():
"""Get token from file if it exists"""
if os.path.exists(".token"):
the_file = open(".token", "rb")
return pickle.load(the_file)
else:
return
def serialize_token(token):
"""Save token to file"""
the_file = open(".token", "wb")
pickle.dump(token, the_file)
def authenticate():
"""Create tweepy auth object and return"""
consumer_key = 'aN6bbamocujBEQkTsT4V5Ok36'
consumer_secret = '5LgoByRZxcQYIW4rumrclYfHqEFUCO5SnYrNFz6O0LyprOvpPs'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# "17417157-dlWa8vJCGGIXtCmvuxNQ3hRYcpCM4YnJWpNvGGLSO"
access_token = deserialize_token()
if access_token is None:
url = auth.get_authorization_url()
webbrowser.open(url, new=1, autoraise=True)
pin = input('Verification pin number from twitter.com: ').strip()
access_token = auth.get_access_token(verifier=pin)
serialize_token(access_token)
auth.set_access_token(access_token[0], access_token[1])
return auth
|
the-stack_0_25113
|
import datetime
from core.util.problem_detail import ProblemDetail
from .admin.announcement_list_validator import AnnouncementListValidator
class Announcements(object):
"""Data model class for a library's announcements.
This entire list is stored as a single
ConfigurationSetting, which is why this isn't in core/model.
"""
SETTING_NAME = "announcements"
@classmethod
def for_library(cls, library):
"""Load an Announcements object for the given Library.
:param library: A Library
"""
announcements = library.setting(cls.SETTING_NAME).json_value or []
return cls(announcements)
def __init__(self, announcements):
"""Instantiate an Announcements object from a (potentially serialised)
list.
:param announcements: A value for the ANNOUNCEMENTS ConfigurationSetting,
either serialized or un-.
:return: A list of Announcement objects. The list will be empty if
there are validation errors in `announcements`.
"""
validator = AnnouncementListValidator()
validated = validator.validate_announcements(announcements)
if isinstance(validated, ProblemDetail):
# There's a problem with the way the announcements were
# serialized to the database. Treat this as an empty list.
validated = []
self.announcements = [Announcement(**data) for data in validated]
@property
def active(self):
"""Yield only the active announcements."""
for a in self.announcements:
if a.is_active:
yield a
class Announcement(object):
"""Data model class for a single library-wide announcement."""
def __init__(self, **kwargs):
"""Instantiate an Announcement from a dictionary of data.
It's assumed that the data is present and valid.
:param id: Globally unique ID for the Announcement.
:param content: Textual content of the announcement.
:param start: The date (relative to the time zone of the server)
on which the announcement should start being published.
:param finish: The date (relative to the time zone of the server)
on which the announcement should stop being published.
"""
self.id = kwargs.pop('id')
self.content = kwargs.pop('content')
self.start = AnnouncementListValidator.validate_date("", kwargs.pop('start'))
self.finish = AnnouncementListValidator.validate_date("", kwargs.pop('finish'))
@property
def json_ready(self):
format = AnnouncementListValidator.DATE_FORMAT
return dict(
id=self.id, content=self.content,
start=datetime.datetime.strftime(self.start, format),
finish=datetime.datetime.strftime(self.finish, format),
)
@property
def is_active(self):
"""Should this announcement be displayed now?"""
today_local = datetime.date.today()
return self.start <= today_local and self.finish >= today_local
@property
def for_authentication_document(self):
"""The publishable representation of this announcement,
for use in an authentication document.
Basically just the ID and the content.
"""
return dict(id=self.id, content=self.content)
|
the-stack_0_25115
|
# -*- coding: utf-8 -*-
"""
12. Integer to Roman
罗马数字包含以下七种字符: I, V, X, L,C,D 和 M。
字符 数值
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为 X + II 。 27 写做 XXVII, 即为 XX + V + II 。
通常情况下,罗马数字中小的数字在大的数字的右边。但也存在特例,例如 4 不写做 IIII,而是 IV。数字 1 在数字 5 的左边,
所表示的数等于大数 5 减小数 1 得到的数值 4 。同样地,数字 9 表示为 IX。这个特殊的规则只适用于以下六种情况:
I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。
X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。
C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。
给定一个整数,将其转为罗马数字。输入确保在 1 到 3999 的范围内。
解题思路:
根据题目要求,可以列出一个表:
----------------------------
阿拉伯数字 | 罗马数字
----------------------------
1000 | M
900 | CM
500 | D
400 | CD
100 | C
90 | XC
50 | L
40 | XL
10 | X
9 | IX
5 | V
4 | IV
1 | I
----------------------------
根据上面表对应关系,找出表中包含数字中可转化的最大罗马数字的数字即可,比如99,100比99大,而90刚好比99小,
是最大可转化的数字,减去还留下9, 对应表可直接转化成 XCIX
"""
class Solution:
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
if num < 1 or num > 3999:
return ""
nums_and_roman = [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'),
(100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),
(10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I')]
result = ""
for n, roman in nums_and_roman:
while num >= n:
num -= n
result += roman
return result
if __name__ == '__main__':
so = Solution()
print(so.intToRoman(3))
print(so.intToRoman(4))
print(so.intToRoman(9))
print(so.intToRoman(58))
print(so.intToRoman(1994))
|
the-stack_0_25117
|
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import cuml
import cupy as cp
import cudf
import numpy as np
import scipy
import math
import dask.array as da
from cuml.linear_model import LinearRegression
def scale(normalized, max_value=10):
mean = normalized.mean(axis=0)
stddev = cp.sqrt(normalized.var(axis=0))
normalized -= mean
normalized /= stddev
normalized[normalized>10] = 10
return normalized
def _regress_out_chunk(data_chunk, regressors):
"""
Performs a data_cunk.shape[1] number of local linear regressions,
replacing the data in the original chunk w/ the regressed result.
"""
output = []
for col in range(data_chunk.shape[1]):
y = data_chunk[:,col]
X = regressors
lr = LinearRegression(fit_intercept=False)
lr.fit(X, y, convert_dtype=True)
mu = lr.predict(X)
data_chunk[:, col] = y - mu
return data_chunk
def normalize_total(filtered_cells, target_sum):
sums = np.array(target_sum / filtered_cells.sum(axis=1)).ravel()
normalized = filtered_cells.multiply(sums[:, np.newaxis]) # Done on host for now
normalized = cp.sparse.csr_matrix(normalized)
return normalized
def regress_out(normalized, n_counts, percent_mito):
regressors = cp.ones((n_counts.shape[0]*3)).reshape((n_counts.shape[0], 3), order="F")
regressors[:, 1] = n_counts
regressors[:, 2] = percent_mito
df_regressors = cudf.DataFrame.from_gpu_matrix(regressors)
da_normalized = da.from_array(normalized, chunks=(-1, 1000), asarray=False)
da_normalized = da_normalized.map_blocks(lambda cols: _regress_out_chunk(cols, df_regressors),
dtype=cp.float32)
return da_normalized.compute()
def filter_cells(sparse_gpu_array, min_genes, max_genes, rows_per_batch=10000):
n_batches = math.ceil(sparse_gpu_array.shape[0] / rows_per_batch)
print("Running %d batches" % n_batches)
filtered_list = []
for batch in range(n_batches):
batch_size = rows_per_batch
start_idx = batch * batch_size
stop_idx = min(batch * batch_size + batch_size, sparse_gpu_array.shape[0])
arr_batch = sparse_gpu_array[start_idx:stop_idx]
filtered_list.append(_filter_cells(arr_batch,
min_genes=min_genes,
max_genes=max_genes))
return scipy.sparse.vstack(filtered_list)
def _filter_cells(sparse_gpu_array, min_genes, max_genes):
degrees = cp.diff(sparse_gpu_array.indptr)
query = ((min_genes <= degrees) & (degrees <= max_genes)).ravel()
return sparse_gpu_array.get()[query.get()]
def filter_genes(sparse_gpu_array, genes_idx, min_cells=0):
thr = np.asarray(sparse_gpu_array.sum(axis=0) >= min_cells).ravel()
filtered_genes = sparse_gpu_array[:,thr]
genes_idx = genes_idx[np.where(thr)[0]]
return filtered_genes, genes_idx.reset_index(drop=True)
def select_groups(labels, groups_order_subset='all'):
"""Get subset of groups in adata.obs[key].
"""
adata_obs_key = labels
groups_order = labels.cat.categories
groups_masks = cp.zeros(
(len(labels.cat.categories), len(labels.cat.codes)), dtype=bool
)
for iname, name in enumerate(labels.cat.categories):
# if the name is not found, fallback to index retrieval
if labels.cat.categories[iname] in labels.cat.codes:
mask = labels.cat.categories[iname] == labels.cat.codes
else:
mask = iname == labels.cat.codes
groups_masks[iname] = mask.values
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
cp.where(cp.array(labels.cat.categories.to_array().astype("int32")) == int(name))[0][0]
)
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = cp.where(
cp.in1d(
cp.arange(len(labels.cat.categories)).astype(str),
cp.array(groups_order_subset),
)
)[0]
groups_ids = [groups_id.item() for groups_id in groups_ids]
groups_masks = groups_masks[groups_ids]
groups_order_subset = labels.cat.categories[groups_ids].to_array().astype(int)
else:
groups_order_subset = groups_order.to_array()
return groups_order_subset, groups_masks
def rank_genes_groups(
X,
labels, # louvain results
var_names,
groupby = str,
groups = None,
reference = 'rest',
n_genes = 100,
key_added = None,
layer = None,
**kwds,
):
#### Wherever we see "adata.obs[groupby], we should just replace w/ the groups"
import time
start = time.time()
# for clarity, rename variable
if groups == 'all':
groups_order = 'all'
elif isinstance(groups, (str, int)):
raise ValueError('Specify a sequence of groups')
else:
groups_order = list(groups)
if isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (
reference != 'rest'
and reference not in set(labels.cat.categories)
):
cats = labels.cat.categories.tolist()
raise ValueError(
f'reference = {reference} needs to be one of groupby = {cats}.'
)
groups_order, groups_masks = select_groups(labels, groups_order)
original_reference = reference
n_vars = len(var_names)
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
n_groups = groups_masks.shape[0]
ns = cp.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = cp.where(mask)[0].size
if reference != 'rest':
ireference = cp.where(groups_order == reference)[0][0]
reference_indices = cp.arange(n_vars, dtype=int)
rankings_gene_scores = []
rankings_gene_names = []
rankings_gene_logfoldchanges = []
rankings_gene_pvals = []
rankings_gene_pvals_adj = []
# if 'log1p' in adata.uns_keys() and adata.uns['log1p']['base'] is not None:
# expm1_func = lambda x: np.expm1(x * np.log(adata.uns['log1p']['base']))
# else:
# expm1_func = np.expm1
# Perform LogReg
# if reference is not set, then the groups listed will be compared to the rest
# if reference is set, then the groups listed will be compared only to the other groups listed
from cuml.linear_model import LogisticRegression
reference = groups_order[0]
if len(groups) == 1:
raise Exception('Cannot perform logistic regression on a single cluster.')
grouping_mask = labels.astype('int').isin(cudf.Series(groups_order))
grouping = labels.loc[grouping_mask]
X = X[grouping_mask.values, :] # Indexing with a series causes issues, possibly segfault
y = labels.loc[grouping]
clf = LogisticRegression(**kwds)
clf.fit(X.get(), grouping.to_array().astype('float32'))
scores_all = cp.array(clf.coef_).T
for igroup, group in enumerate(groups_order):
if len(groups_order) <= 2: # binary logistic regression
scores = scores_all[0]
else:
scores = scores_all[igroup]
partition = cp.argpartition(scores, -n_genes_user)[-n_genes_user:]
partial_indices = cp.argsort(scores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_scores.append(scores[global_indices].get()) ## Shouldn't need to take this off device
rankings_gene_names.append(var_names[global_indices])
if len(groups_order) <= 2:
break
groups_order_save = [str(g) for g in groups_order]
if (len(groups) == 2):
groups_order_save = [g for g in groups_order if g != reference]
print("GPU Ranking took: " + str(time.time() - start))
start = time.time()
scores = np.rec.fromarrays(
[n for n in rankings_gene_scores],
dtype=[(rn, 'float32') for rn in groups_order_save],
)
names = np.rec.fromarrays(
[n for n in rankings_gene_names],
dtype=[(rn, 'U50') for rn in groups_order_save],
)
print("Created np.rec.fromarrays in: " + str(time.time() - start))
return scores, names, original_reference
|
the-stack_0_25119
|
# -*- coding: utf-8 -*-
import copy
import os
import unittest
import shutil
from parameterized import parameterized
import tensorflow as tf
from opennmt import Runner
from opennmt.config import load_model
from opennmt.utils import misc
from opennmt.tests import test_util
test_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.join(test_dir, "..", "..")
test_data = os.path.join(root_dir, "testdata")
@unittest.skipIf(not os.path.isdir(test_data), "Missing test data directory")
class RunnerTest(tf.test.TestCase):
def _getTransliterationRunner(self, base_config=None, model_version="v2"):
model_dir = os.path.join(self.get_temp_dir(), "model")
shutil.copytree(os.path.join(test_data, "transliteration-aren-v2", model_version), model_dir)
config = {}
config["model_dir"] = model_dir
config["data"] = {
"source_vocabulary": os.path.join(model_dir, "ar.vocab"),
"target_vocabulary": os.path.join(model_dir, "en.vocab"),
}
if base_config is not None:
config = misc.merge_dict(config, base_config)
model = load_model(model_dir)
runner = Runner(model, config)
return runner
def _makeTransliterationData(self):
ar = [
"آ ت ز م و ن",
"آ ت ش ي س و ن",
"آ ر ب ا ك ه",
"آ ر ث ر",
"آ ز ا",
]
en = [
"a t z m o n",
"a c h e s o n",
"a a r b a k k e",
"a r t h u r",
"a s a"
]
ar_file = test_util.make_data_file(os.path.join(self.get_temp_dir(), "ar.txt"), ar)
en_file = test_util.make_data_file(os.path.join(self.get_temp_dir(), "en.txt"), en)
return ar_file, en_file
def testTrain(self):
ar_file, en_file = self._makeTransliterationData()
config = {
"data": {
"train_features_file": ar_file,
"train_labels_file": en_file
},
"params": {
"learning_rate": 0.0005,
"optimizer": "Adam"
},
"train": {
"batch_size": 10,
"average_last_checkpoints": 4,
"save_checkpoints_steps": 1,
"max_step": 145002 # Just train for 2 steps.
}
}
runner = self._getTransliterationRunner(config)
avg_dir = runner.train()
self.assertEndsWith(tf.train.latest_checkpoint(avg_dir), "145002")
self.assertLen(tf.train.get_checkpoint_state(avg_dir).all_model_checkpoint_paths, 1)
model_dir = os.path.dirname(avg_dir)
self.assertEndsWith(tf.train.latest_checkpoint(model_dir), "145002")
self.assertLen(tf.train.get_checkpoint_state(model_dir).all_model_checkpoint_paths, 3)
# Check that the averaged checkpoint is usable.
ar_file, _ = self._makeTransliterationData()
en_file = os.path.join(self.get_temp_dir(), "output.txt")
runner.infer(ar_file, predictions_file=en_file, checkpoint_path=avg_dir)
with open(en_file) as f:
self.assertEqual(next(f).strip(), "a t z m o n")
def testTrainWithEval(self):
ar_file, en_file = self._makeTransliterationData()
config = {
"data": {
"train_features_file": ar_file,
"train_labels_file": en_file,
"eval_features_file": ar_file,
"eval_labels_file": en_file
},
"params": {
"learning_rate": 0.0005,
"optimizer": "Adam"
},
"train": {
"batch_size": 10,
"max_step": 145002 # Just train for 2 steps.
},
"eval": {
"export_on_best": "loss"
}
}
runner = self._getTransliterationRunner(config)
model_dir = runner.train(with_eval=True)
export_dir = os.path.join(model_dir, "export", "145002")
self.assertTrue(os.path.exists(export_dir))
self.assertTrue(tf.saved_model.contains_saved_model(export_dir))
def testEvaluate(self):
ar_file, en_file = self._makeTransliterationData()
config = {
"data": {
"eval_features_file": ar_file,
"eval_labels_file": en_file
},
"eval": {
"external_evaluators": "BLEU"
}
}
runner = self._getTransliterationRunner(config)
metrics = runner.evaluate()
self.assertIn("loss", metrics)
self.assertIn("bleu", metrics)
@parameterized.expand([[1, "v2"], [4, "v2"], [1, "v1"]])
def testInfer(self, beam_size, model_version):
config = {
"params": {
"beam_width": beam_size
}
}
runner = self._getTransliterationRunner(config, model_version)
ar_file, _ = self._makeTransliterationData()
en_file = os.path.join(self.get_temp_dir(), "output.txt")
runner.infer(ar_file, predictions_file=en_file)
self.assertTrue(os.path.exists(en_file))
with open(en_file) as f:
lines = f.readlines()
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0].strip(), "a t z m o n")
def testUpdateVocab(self):
config = {
"params": {
"learning_rate": 0.0005,
"optimizer": "Adam"
}
}
runner = self._getTransliterationRunner(config)
# Reverse order of non special tokens.
new_en_vocab = os.path.join(self.get_temp_dir(), "en.vocab.new")
with open(os.path.join(runner._config["model_dir"], "en.vocab")) as en_vocab, \
open(new_en_vocab, "w") as new_vocab:
tokens = en_vocab.readlines()
for token in tokens[:3]:
new_vocab.write(token)
for token in reversed(tokens[3:]):
new_vocab.write(token)
output_dir = os.path.join(self.get_temp_dir(), "updated_vocab")
self.assertEqual(runner.update_vocab(output_dir, tgt_vocab=new_en_vocab), output_dir)
# Check that the translation is unchanged.
new_config = copy.deepcopy(runner._config)
new_config["model_dir"] = output_dir
new_config["data"]["target_vocabulary"] = new_en_vocab
runner = Runner(runner._model, new_config)
ar_file, _ = self._makeTransliterationData()
en_file = os.path.join(self.get_temp_dir(), "output.txt")
runner.infer(ar_file, predictions_file=en_file)
with open(en_file) as f:
self.assertEqual(next(f).strip(), "a t z m o n")
def testScore(self):
runner = self._getTransliterationRunner()
ar_file, en_file = self._makeTransliterationData()
score_file = os.path.join(self.get_temp_dir(), "scores.txt")
runner.score(ar_file, en_file, output_file=score_file)
self.assertTrue(os.path.exists(score_file))
with open(score_file) as f:
lines = f.readlines()
self.assertEqual(len(lines), 5)
def testExport(self):
config = {
"data": {
"source_tokenization": {
"mode": "char"
}
}
}
export_dir = os.path.join(self.get_temp_dir(), "export")
runner = self._getTransliterationRunner(config)
runner.export(export_dir)
self.assertTrue(tf.saved_model.contains_saved_model(export_dir))
extra_assets_dir = os.path.join(export_dir, "assets.extra")
self.assertTrue(os.path.isdir(extra_assets_dir))
self.assertLen(os.listdir(extra_assets_dir), 1)
imported = tf.saved_model.load(export_dir)
translate_fn = imported.signatures["serving_default"]
outputs = translate_fn(
tokens=tf.constant([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]]),
length=tf.constant([6], dtype=tf.int32))
result = tf.nest.map_structure(lambda x: x[0, 0], outputs)
tokens = result["tokens"][:result["length"]]
self.assertAllEqual(tokens, [b"a", b"t", b"z", b"m", b"o", b"n"])
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_25121
|
# MenuTitle: Copy selected glyphs' names to Clipboard
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__ = """
Copy the currection selection's name to the clipboard.
"""
import subprocess
font = Glyphs.font
gnames = '\n'.join(glyph.name for glyph in font.selection)
def getClipboardData():
p = subprocess.Popen(['pbpaste'], stdout=subprocess.PIPE)
retcode = p.wait()
data = p.stdout.read()
return data
def setClipboardData(data):
p = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)
p.stdin.write(data)
p.stdin.close()
retcode = p.wait()
setClipboardData(gnames)
|
the-stack_0_25122
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Test workflow to query image metadata.
Usage:
pytest tests/st/func/datavisual
"""
import pytest
from .. import globals as gbl
from .....utils.tools import get_url
from mindinsight.datavisual.common.enums import PluginNameEnum
TRAIN_JOB_URL = '/v1/mindinsight/datavisual/train-jobs'
PLUGIN_URL = '/v1/mindinsight/datavisual/plugins'
METADATA_URL = '/v1/mindinsight/datavisual/image/metadata'
class TestImageMetadataFlow:
"""Test Image Metadata."""
@pytest.mark.level0
@pytest.mark.env_single
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.usefixtures("init_summary_logs")
def test_image_metadata(self, client):
"""Test getting image metadata."""
plugin_name = PluginNameEnum.IMAGE.value
# Get train id by restful api `train-jobs`.
response = client.get(get_url(TRAIN_JOB_URL, dict()))
train_jobs = response.get_json()
train_id = train_jobs.get('train_jobs')[-1].get('train_id')
# Get tag by restful api `plugins`.
params = dict(train_id=train_id, plugin=plugin_name)
response = client.get(get_url(PLUGIN_URL, params))
plugins = response.get_json().get('plugins')
test_image_tag = plugins.get(plugin_name)[0]
expected_metadata = gbl.get_metadata(train_id, test_image_tag)
# Query image metadata.
params = dict(train_id=train_id, tag=test_image_tag)
response = client.get(get_url(METADATA_URL, params))
metadata = response.get_json().get("metadatas")
assert metadata == expected_metadata
|
the-stack_0_25126
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import pkgutil
import importlib.util
import time
import threading
import sys
from typing import NamedTuple, Any, Union, TYPE_CHECKING, Optional
from .i18n import _
from .util import (profiler, DaemonThread, UserCancelled, ThreadJob)
from . import bip32
from . import plugins
from .simple_config import SimpleConfig
from .logging import get_logger, Logger
if TYPE_CHECKING:
from .plugins.hw_wallet import HW_PluginBase
_logger = get_logger(__name__)
plugin_loaders = {}
hook_names = set()
hooks = {}
class Plugins(DaemonThread):
LOGGING_SHORTCUT = 'p'
@profiler
def __init__(self, config: SimpleConfig, gui_name):
DaemonThread.__init__(self)
self.setName('Plugins')
self.pkgpath = os.path.dirname(plugins.__file__)
self.config = config
self.hw_wallets = {}
self.plugins = {}
self.gui_name = gui_name
self.descriptions = {}
self.device_manager = DeviceMgr(config)
self.load_plugins()
self.add_jobs(self.device_manager.thread_jobs())
self.start()
def load_plugins(self):
for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]):
full_name = f'electrum_sum.plugins.{name}'
spec = importlib.util.find_spec(full_name)
if spec is None: # pkgutil found it but importlib can't ?!
raise Exception(f"Error pre-loading {full_name}: no spec")
try:
module = importlib.util.module_from_spec(spec)
# sys.modules needs to be modified for relative imports to work
# see https://stackoverflow.com/a/50395128
sys.modules[spec.name] = module
spec.loader.exec_module(module)
except Exception as e:
raise Exception(f"Error pre-loading {full_name}: {repr(e)}") from e
d = module.__dict__
gui_good = self.gui_name in d.get('available_for', [])
if not gui_good:
continue
details = d.get('registers_wallet_type')
if details:
self.register_wallet_type(name, gui_good, details)
details = d.get('registers_keystore')
if details:
self.register_keystore(name, gui_good, details)
self.descriptions[name] = d
if not d.get('requires_wallet_type') and self.config.get('use_' + name):
try:
self.load_plugin(name)
except BaseException as e:
self.logger.exception(f"cannot initialize plugin {name}: {e}")
def get(self, name):
return self.plugins.get(name)
def count(self):
return len(self.plugins)
def load_plugin(self, name):
if name in self.plugins:
return self.plugins[name]
full_name = f'electrum_sum.plugins.{name}.{self.gui_name}'
spec = importlib.util.find_spec(full_name)
if spec is None:
raise RuntimeError("%s implementation for %s plugin not found"
% (self.gui_name, name))
try:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
plugin = module.Plugin(self, self.config, name)
except Exception as e:
raise Exception(f"Error loading {name} plugin: {repr(e)}") from e
self.add_jobs(plugin.thread_jobs())
self.plugins[name] = plugin
self.logger.info(f"loaded {name}")
return plugin
def close_plugin(self, plugin):
self.remove_jobs(plugin.thread_jobs())
def enable(self, name):
self.config.set_key('use_' + name, True, True)
p = self.get(name)
if p:
return p
return self.load_plugin(name)
def disable(self, name):
self.config.set_key('use_' + name, False, True)
p = self.get(name)
if not p:
return
self.plugins.pop(name)
p.close()
self.logger.info(f"closed {name}")
def toggle(self, name):
p = self.get(name)
return self.disable(name) if p else self.enable(name)
def is_available(self, name, w):
d = self.descriptions.get(name)
if not d:
return False
deps = d.get('requires', [])
for dep, s in deps:
try:
__import__(dep)
except ImportError as e:
self.logger.warning(f'Plugin {name} unavailable: {repr(e)}')
return False
requires = d.get('requires_wallet_type', [])
return not requires or w.wallet_type in requires
def get_hardware_support(self):
out = []
for name, (gui_good, details) in self.hw_wallets.items():
if gui_good:
try:
p = self.get_plugin(name)
if p.is_enabled():
out.append(HardwarePluginToScan(name=name,
description=details[2],
plugin=p,
exception=None))
except Exception as e:
self.logger.exception(f"cannot load plugin for: {name}")
out.append(HardwarePluginToScan(name=name,
description=details[2],
plugin=None,
exception=e))
return out
def register_wallet_type(self, name, gui_good, wallet_type):
from .wallet import register_wallet_type, register_constructor
self.logger.info(f"registering wallet type {(wallet_type, name)}")
def loader():
plugin = self.get_plugin(name)
register_constructor(wallet_type, plugin.wallet_class)
register_wallet_type(wallet_type)
plugin_loaders[wallet_type] = loader
def register_keystore(self, name, gui_good, details):
from .keystore import register_keystore
def dynamic_constructor(d):
return self.get_plugin(name).keystore_class(d)
if details[0] == 'hardware':
self.hw_wallets[name] = (gui_good, details)
self.logger.info(f"registering hardware {name}: {details}")
register_keystore(details[1], dynamic_constructor)
def get_plugin(self, name):
if not name in self.plugins:
self.load_plugin(name)
return self.plugins[name]
def run(self):
while self.is_running():
time.sleep(0.1)
self.run_jobs()
self.on_stop()
def hook(func):
hook_names.add(func.__name__)
return func
def run_hook(name, *args):
results = []
f_list = hooks.get(name, [])
for p, f in f_list:
if p.is_enabled():
try:
r = f(*args)
except Exception:
_logger.exception(f"Plugin error. plugin: {p}, hook: {name}")
r = False
if r:
results.append(r)
if results:
assert len(results) == 1, results
return results[0]
class BasePlugin(Logger):
def __init__(self, parent, config, name):
self.parent = parent # The plugins object
self.name = name
self.config = config
self.wallet = None
Logger.__init__(self)
# add self to hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.append((self, getattr(self, k)))
hooks[k] = l
def __str__(self):
return self.name
def close(self):
# remove self from hooks
for attr_name in dir(self):
if attr_name in hook_names:
# found attribute in self that is also the name of a hook
l = hooks.get(attr_name, [])
try:
l.remove((self, getattr(self, attr_name)))
except ValueError:
# maybe attr name just collided with hook name and was not hook
continue
hooks[attr_name] = l
self.parent.close_plugin(self)
self.on_close()
def on_close(self):
pass
def requires_settings(self):
return False
def thread_jobs(self):
return []
def is_enabled(self):
return self.is_available() and self.config.get('use_'+self.name) is True
def is_available(self):
return True
def can_user_disable(self):
return True
def settings_dialog(self):
pass
class DeviceNotFoundError(Exception): pass
class DeviceUnpairableError(Exception): pass
class HardwarePluginLibraryUnavailable(Exception): pass
class Device(NamedTuple):
path: Union[str, bytes]
interface_number: int
id_: str
product_key: Any # when using hid, often Tuple[int, int]
usage_page: int
transport_ui_string: str
class DeviceInfo(NamedTuple):
device: Device
label: Optional[str] = None
initialized: Optional[bool] = None
exception: Optional[Exception] = None
class HardwarePluginToScan(NamedTuple):
name: str
description: str
plugin: Optional['HW_PluginBase']
exception: Optional[Exception]
class DeviceMgr(ThreadJob):
'''Manages hardware clients. A client communicates over a hardware
channel with the device.
In addition to tracking device HID IDs, the device manager tracks
hardware wallets and manages wallet pairing. A HID ID may be
paired with a wallet when it is confirmed that the hardware device
matches the wallet, i.e. they have the same master public key. A
HID ID can be unpaired if e.g. it is wiped.
Because of hotplugging, a wallet must request its client
dynamically each time it is required, rather than caching it
itself.
The device manager is shared across plugins, so just one place
does hardware scans when needed. By tracking HID IDs, if a device
is plugged into a different port the wallet is automatically
re-paired.
Wallets are informed on connect / disconnect events. It must
implement connected(), disconnected() callbacks. Being connected
implies a pairing. Callbacks can happen in any thread context,
and we do them without holding the lock.
Confusingly, the HID ID (serial number) reported by the HID system
doesn't match the device ID reported by the device itself. We use
the HID IDs.
This plugin is thread-safe. Currently only devices supported by
hidapi are implemented.'''
def __init__(self, config):
ThreadJob.__init__(self)
# Keyed by xpub. The value is the device id
# has been paired, and None otherwise.
self.xpub_ids = {}
# A list of clients. The key is the client, the value is
# a (path, id_) pair.
self.clients = {}
# What we recognise. Each entry is a (vendor_id, product_id)
# pair.
self.recognised_hardware = set()
# Custom enumerate functions for devices we don't know about.
self.enumerate_func = set()
# For synchronization
self.lock = threading.RLock()
self.hid_lock = threading.RLock()
self.config = config
def thread_jobs(self):
# Thread job to handle device timeouts
return [self]
def run(self):
'''Handle device timeouts. Runs in the context of the Plugins
thread.'''
with self.lock:
clients = list(self.clients.keys())
cutoff = time.time() - self.config.get_session_timeout()
for client in clients:
client.timeout(cutoff)
def register_devices(self, device_pairs):
for pair in device_pairs:
self.recognised_hardware.add(pair)
def register_enumerate_func(self, func):
self.enumerate_func.add(func)
def create_client(self, device, handler, plugin):
# Get from cache first
client = self.client_lookup(device.id_)
if client:
return client
client = plugin.create_client(device, handler)
if client:
self.logger.info(f"Registering {client}")
with self.lock:
self.clients[client] = (device.path, device.id_)
return client
def xpub_id(self, xpub):
with self.lock:
return self.xpub_ids.get(xpub)
def xpub_by_id(self, id_):
with self.lock:
for xpub, xpub_id in self.xpub_ids.items():
if xpub_id == id_:
return xpub
return None
def unpair_xpub(self, xpub):
with self.lock:
if xpub not in self.xpub_ids:
return
_id = self.xpub_ids.pop(xpub)
self._close_client(_id)
def unpair_id(self, id_):
xpub = self.xpub_by_id(id_)
if xpub:
self.unpair_xpub(xpub)
else:
self._close_client(id_)
def _close_client(self, id_):
client = self.client_lookup(id_)
self.clients.pop(client, None)
if client:
client.close()
def pair_xpub(self, xpub, id_):
with self.lock:
self.xpub_ids[xpub] = id_
def client_lookup(self, id_):
with self.lock:
for client, (path, client_id) in self.clients.items():
if client_id == id_:
return client
return None
def client_by_id(self, id_):
'''Returns a client for the device ID if one is registered. If
a device is wiped or in bootloader mode pairing is impossible;
in such cases we communicate by device ID and not wallet.'''
self.scan_devices()
return self.client_lookup(id_)
def client_for_keystore(self, plugin, handler, keystore, force_pair):
self.logger.info("getting client for keystore")
if handler is None:
raise Exception(_("Handler not found for") + ' ' + plugin.name + '\n' + _("A library is probably missing."))
handler.update_status(False)
devices = self.scan_devices()
xpub = keystore.xpub
derivation = keystore.get_derivation()
client = self.client_by_xpub(plugin, xpub, handler, devices)
if client is None and force_pair:
info = self.select_device(plugin, handler, keystore, devices)
client = self.force_pair_xpub(plugin, handler, info, xpub, derivation, devices)
if client:
handler.update_status(True)
self.logger.info("end client for keystore")
return client
def client_by_xpub(self, plugin, xpub, handler, devices):
_id = self.xpub_id(xpub)
client = self.client_lookup(_id)
if client:
# An unpaired client might have another wallet's handler
# from a prior scan. Replace to fix dialog parenting.
client.handler = handler
return client
for device in devices:
if device.id_ == _id:
return self.create_client(device, handler, plugin)
def force_pair_xpub(self, plugin, handler, info, xpub, derivation, devices):
# The wallet has not been previously paired, so let the user
# choose an unpaired device and compare its first address.
xtype = bip32.xpub_type(xpub)
client = self.client_lookup(info.device.id_)
if client and client.is_pairable():
# See comment above for same code
client.handler = handler
# This will trigger a PIN/passphrase entry request
try:
client_xpub = client.get_xpub(derivation, xtype)
except (UserCancelled, RuntimeError):
# Bad / cancelled PIN / passphrase
client_xpub = None
if client_xpub == xpub:
self.pair_xpub(xpub, info.device.id_)
return client
# The user input has wrong PIN or passphrase, or cancelled input,
# or it is not pairable
raise DeviceUnpairableError(
_('Electrum cannot pair with your {}.\n\n'
'Before you request sumcoins to be sent to addresses in this '
'wallet, ensure you can pair with your device, or that you have '
'its seed (and passphrase, if any). Otherwise all sumcoins you '
'receive will be unspendable.').format(plugin.device))
def unpaired_device_infos(self, handler, plugin: 'HW_PluginBase', devices=None,
include_failing_clients=False):
'''Returns a list of DeviceInfo objects: one for each connected,
unpaired device accepted by the plugin.'''
if not plugin.libraries_available:
message = plugin.get_library_not_available_message()
raise HardwarePluginLibraryUnavailable(message)
if devices is None:
devices = self.scan_devices()
devices = [dev for dev in devices if not self.xpub_by_id(dev.id_)]
infos = []
for device in devices:
if device.product_key not in plugin.DEVICE_IDS:
continue
try:
client = self.create_client(device, handler, plugin)
except Exception as e:
self.logger.error(f'failed to create client for {plugin.name} at {device.path}: {repr(e)}')
if include_failing_clients:
infos.append(DeviceInfo(device=device, exception=e))
continue
if not client:
continue
infos.append(DeviceInfo(device=device,
label=client.label(),
initialized=client.is_initialized()))
return infos
def select_device(self, plugin, handler, keystore, devices=None):
'''Ask the user to select a device to use if there is more than one,
and return the DeviceInfo for the device.'''
while True:
infos = self.unpaired_device_infos(handler, plugin, devices)
if infos:
break
msg = _('Please insert your {}').format(plugin.device)
if keystore.label:
msg += ' ({})'.format(keystore.label)
msg += '. {}\n\n{}'.format(
_('Verify the cable is connected and that '
'no other application is using it.'),
_('Try to connect again?')
)
if not handler.yes_no_question(msg):
raise UserCancelled()
devices = None
if len(infos) == 1:
return infos[0]
# select device by label
for info in infos:
if info.label == keystore.label:
return info
msg = _("Please select which {} device to use:").format(plugin.device)
descriptions = [str(info.label) + ' (%s)'%(_("initialized") if info.initialized else _("wiped")) for info in infos]
c = handler.query_choice(msg, descriptions)
if c is None:
raise UserCancelled()
info = infos[c]
# save new label
keystore.set_label(info.label)
if handler.win.wallet is not None:
handler.win.wallet.save_keystore()
return info
def _scan_devices_with_hid(self):
try:
import hid
except ImportError:
return []
with self.hid_lock:
hid_list = hid.enumerate(0, 0)
devices = []
for d in hid_list:
product_key = (d['vendor_id'], d['product_id'])
if product_key in self.recognised_hardware:
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', -1)
usage_page = d['usage_page']
id_ = d['serial_number']
if len(id_) == 0:
id_ = str(d['path'])
id_ += str(interface_number) + str(usage_page)
devices.append(Device(path=d['path'],
interface_number=interface_number,
id_=id_,
product_key=product_key,
usage_page=usage_page,
transport_ui_string='hid'))
return devices
def scan_devices(self):
self.logger.info("scanning devices...")
# First see what's connected that we know about
devices = self._scan_devices_with_hid()
# Let plugin handlers enumerate devices we don't know about
for f in self.enumerate_func:
try:
new_devices = f()
except BaseException as e:
self.logger.error('custom device enum failed. func {}, error {}'
.format(str(f), str(e)))
else:
devices.extend(new_devices)
# find out what was disconnected
pairs = [(dev.path, dev.id_) for dev in devices]
disconnected_ids = []
with self.lock:
connected = {}
for client, pair in self.clients.items():
if pair in pairs and client.has_usable_connection_with_device():
connected[client] = pair
else:
disconnected_ids.append(pair[1])
self.clients = connected
# Unpair disconnected devices
for id_ in disconnected_ids:
self.unpair_id(id_)
return devices
|
the-stack_0_25127
|
"""Define latency predictor that predict the latency of model on devices.
"""
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import shutil
import subprocess
import warnings
import urllib.request as request
import ssl
import paddle
from .parse_ops import get_key_from_op
from .extract_features import get_data_from_tables, get_features_from_paramkey
from ._utils import opt_model, load_predictor, nearest_interpolate, _get_download
from ..core import GraphWrapper
__all__ = ["LatencyPredictor", "TableLatencyPredictor"]
TABLE_URL = 'https://paddlemodels.bj.bcebos.com/PaddleSlim/analysis/'
def format_Warning(message, category, filename, lineno, line=''):
return str(filename) + ':' + str(
lineno) + ': ' + category.__name__ + ': ' + str(message) + '\n'
warnings.formatwarning = format_Warning
class LatencyPredictor(object):
"""Base class of latency predictor.
"""
def predict(self, model):
"""Get latency of model. It is an abstract method.
Args:
model: The model to be evaluated.
Returns:
latency(float): The latency of given model on current evaluator.
"""
raise NotImplementedError('Abstract method.')
def _get_key_info_from_graph(self, graph):
graph_keys = []
for op in graph.ops():
param_key = get_key_from_op(op)
graph_keys.append(param_key)
return graph_keys
class TableLatencyPredictor(LatencyPredictor):
"""The preditor used to get pbmodel's latency on some devices and infer engines.
Args:
table_file(str): The path of file that records the device latency of operators.
"""
hardware_list = ['SD625', 'SD710', 'RK3288']
def __init__(self, table_file='SD710'):
self.table_file = table_file
self.table_dict = {}
self.hardware = None
self.threads = None
self.predictor_state = False
self.predictor = {}
self._initial_table()
@classmethod
def add_hardware(cls, hardware):
cls.hardware_list.append(hardware)
def _initial_table(self):
if self.table_file in TableLatencyPredictor.hardware_list:
self.hardware = self.table_file
self.threads = 4
self.table_file = f'{self.hardware}_threads_4_power_mode_0.pkl'
self.predictor_state = True
url = TABLE_URL + self.table_file
while not (os.path.exists(self.table_file)):
if not _get_download(url, self.table_file):
time.sleep(1)
continue
print('Successfully download {}!'.format(self.table_file))
assert os.path.exists(
self.table_file
), f'{self.table_file} does not exist. If you want to use our table files, please set \'table_file\' in {TableLatencyPredictor.hardware_list}'
with open(self.table_file, 'rb') as f:
self.table_dict = pickle.load(f)
print('Successfully load {}'.format(self.table_file))
def _change_table(self, threads=4):
assert threads == 4, 'Only 4 threads are available now.'
self.table_file = f'{self.hardware}_threads_{threads}_power_mode_0.pkl'
if not os.path.exists(self.table_file):
subprocess.call(
f'wget https://paddlemodels.bj.bcebos.com/PaddleSlim/analysis/{self.table_file}',
shell=True)
with open(self.table_file, 'rb') as f:
self.table_dict = pickle.load(f)
print('Successfully loaded {}'.format(self.table_file))
def _get_input_shape(self, graph):
in_shape = []
for op in graph.ops():
param_key = get_key_from_op(op)
if param_key != '':
in_shape = op.all_inputs()[-1].shape()
break
return in_shape
def _preload_predictor(self, data_type='fp32'):
op_types = [
'depthwise_conv2d', 'conv2d', 'pool2d', 'matmul', 'elementwise_add',
'elementwise_mul', 'concat', 'calib', 'swish'
]
op_dir = self.table_file.split('.')[0] + '_batchsize_1'
for op_type in op_types:
if data_type == 'fp32' and op_type == 'calib':
continue
model = load_predictor(op_type, op_dir, data_type)
key = op_type
if 'conv2d' in op_type:
key = f'{op_type}_{data_type}'
self.predictor[key] = model
def predict(self,
model_file,
param_file,
data_type,
threads=4,
input_shape=None):
"""predict the latency of the model
Args:
model_file(str), param_file(str): The inference model(*.pdmodel, *.pdiparams).
data_type(str): Data type, fp32, fp16 or int8.
threads(int): Threads num.
input_shape(list): Generally, the input shape is confirmed when saving the inference model and the parameter is only effective for input shape that has variable length.
Returns:
latency(float): The latency of the model.
"""
assert data_type in ['fp32', 'int8', 'fp16'
], f'data_type must be one of [fp32, int8, fp16]'
if self.hardware and self.threads != threads:
self._change_table(threads)
if self.predictor_state and f'conv2d_{data_type}' not in self.predictor:
self._preload_predictor(data_type)
enable_fp16 = True if data_type == 'fp16' else False
pbmodel_file = opt_model(
model_file=model_file,
param_file=param_file,
optimize_out_type='protobuf',
enable_fp16=enable_fp16)
paddle.enable_static()
with open(pbmodel_file, "rb") as f:
fluid_program = paddle.fluid.framework.Program.parse_from_string(
f.read())
graph = GraphWrapper(fluid_program)
if input_shape != None:
ori_shape = self._get_input_shape(graph)
assert ori_shape == input_shape, "The parameter \'input_shape\' dosn't work for now. The input shape is fixed when saving the inference model"
latency = 0.0
new_op = {}
for op in graph.ops():
param_key = get_key_from_op(op)
if param_key == '':
continue
if param_key == None:
if op.type() in new_op:
new_op[op.type()] += 1
else:
new_op.update({op.type(): 1})
continue
if param_key in self.table_dict:
latency += self.table_dict[param_key]
elif self.predictor_state:
latency += self.op_predictor(op.type(), param_key, data_type)
if len(new_op) != 0:
warnings.warn(
"These ops are not currently supported. Please raise an issue in PaddleSlim if you find the CalledTimes is large enough to affect the accuracy."
)
warnings.warn("OperatorType\tCalledTimes")
for key in new_op:
warnings.warn(f"{key.ljust(15)}\t{new_op[key]}")
shutil.rmtree(os.path.dirname(pbmodel_file))
return latency
def op_predictor(self, op_type, param_key, data_type):
"""predict the latency of the operator which is not in the table
Args:
op_type: The operator's type
param_key: The operator's parameter information.
data_type: Data type, fp32 or int8.
Returns:
latency(float): The latency of the operator.
"""
latency = 0.0
if op_type in [
'depthwise_conv2d', 'conv2d', 'pool2d', 'matmul',
'elementwise_add', 'elementwise_mul', 'concat', 'calib', 'swish'
]:
key = op_type
if 'conv2d' in op_type:
key = f'{op_type}_{data_type}'
predictor = self.predictor[key]
features = get_features_from_paramkey(param_key, op_type, data_type)
latency = predictor.predict([features])
else:
data = get_data_from_tables(
table_dict=self.table_dict,
op_type=op_type,
data_type=data_type)
features = get_features_from_paramkey(param_key, op_type, data_type)
latency = nearest_interpolate(features, data)
assert latency != None, f'{param_key} is not in the table.'
return latency
|
the-stack_0_25129
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_request_site_address, encode
from frappe.model.document import Document
from six.moves.urllib.parse import quote
from frappe.website.router import resolve_route
from frappe.website.doctype.website_theme.website_theme import add_website_theme
class WebsiteSettings(Document):
def validate(self):
self.validate_top_bar_items()
self.validate_footer_items()
self.validate_home_page()
def validate_home_page(self):
if frappe.flags.in_install:
return
if self.home_page and not resolve_route(self.home_page):
frappe.msgprint(_("Invalid Home Page") + " (Standard pages - index, login, products, blog, about, contact)")
self.home_page = ''
def validate_top_bar_items(self):
"""validate url in top bar items"""
for top_bar_item in self.get("top_bar_items"):
if top_bar_item.parent_label:
parent_label_item = self.get("top_bar_items", {"label": top_bar_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(top_bar_item.parent_label, top_bar_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(top_bar_item.parent_label,
top_bar_item.idx))
def validate_footer_items(self):
"""validate url in top bar items"""
for footer_item in self.get("footer_items"):
if footer_item.parent_label:
parent_label_item = self.get("footer_items", {"label": footer_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(footer_item.parent_label, footer_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(footer_item.parent_label,
footer_item.idx))
def on_update(self):
self.clear_cache()
def clear_cache(self):
# make js and css
# clear web cache (for menus!)
frappe.clear_cache(user = 'Guest')
from frappe.website.render import clear_cache
clear_cache()
# clears role based home pages
frappe.clear_cache()
def get_website_settings():
hooks = frappe.get_hooks()
context = frappe._dict({
'top_bar_items': get_items('top_bar_items'),
'footer_items': get_items('footer_items'),
"post_login": [
{"label": _("My Account"), "url": "/me"},
# {"class": "divider"},
{"label": _("Logout"), "url": "/?cmd=web_logout"}
]
})
settings = frappe.get_single("Website Settings")
for k in ["banner_html", "brand_html", "copyright", "twitter_share_via",
"facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup", "hide_footer_signup", "head_html", "title_prefix",
"navbar_search"]:
if hasattr(settings, k):
context[k] = settings.get(k)
if settings.address:
context["footer_address"] = settings.address
for k in ["facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup"]:
context[k] = int(context.get(k) or 0)
if frappe.request:
context.url = quote(str(get_request_site_address(full_address=True)), safe="/:")
context.encoded_title = quote(encode(context.title or ""), str(""))
for update_website_context in hooks.update_website_context or []:
frappe.get_attr(update_website_context)(context)
context.web_include_js = hooks.web_include_js or []
context.web_include_css = hooks.web_include_css or []
via_hooks = frappe.get_hooks("website_context")
for key in via_hooks:
context[key] = via_hooks[key]
if key not in ("top_bar_items", "footer_items", "post_login") \
and isinstance(context[key], (list, tuple)):
context[key] = context[key][-1]
add_website_theme(context)
if not context.get("favicon"):
context["favicon"] = "/assets/frappe/images/favicon.png"
if settings.favicon and settings.favicon != "attach_files:":
context["favicon"] = settings.favicon
return context
def get_items(parentfield):
all_top_items = frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield= %s
order by idx asc""", parentfield, as_dict=1)
top_items = [d for d in all_top_items if not d['parent_label']]
# attach child items to top bar
for d in all_top_items:
if d['parent_label']:
for t in top_items:
if t['label']==d['parent_label']:
if not 'child_items' in t:
t['child_items'] = []
t['child_items'].append(d)
break
return top_items
@frappe.whitelist(allow_guest=True)
def is_chat_enabled():
return bool(frappe.db.get_single_value('Website Settings', 'chat_enable'))
|
the-stack_0_25131
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A setup module for the GRPC Python package."""
# setuptools need to be imported before distutils. Otherwise it might lead to
# undesirable behaviors or errors.
import setuptools # isort:skip
# Monkey Patch the unix compiler to accept ASM
# files used by boring SSL.
from distutils.unixccompiler import UnixCCompiler
UnixCCompiler.src_extensions.append('.S')
del UnixCCompiler
from distutils import cygwinccompiler
from distutils import extension as _extension
from distutils import util
import os
import os.path
import platform
import re
import shlex
import shutil
import subprocess
from subprocess import PIPE
import sys
import sysconfig
import _metadata
import pkg_resources
from setuptools.command import egg_info
# Redirect the manifest template from MANIFEST.in to PYTHON-MANIFEST.in.
egg_info.manifest_maker.template = 'PYTHON-MANIFEST.in'
PY3 = sys.version_info.major == 3
PYTHON_STEM = os.path.join('src', 'python', 'grpcio')
CORE_INCLUDE = (
'include',
'.',
)
ABSL_INCLUDE = (os.path.join('third_party', 'abseil-cpp'),)
ADDRESS_SORTING_INCLUDE = (os.path.join('third_party', 'address_sorting',
'include'),)
CARES_INCLUDE = (
os.path.join('third_party', 'cares'),
os.path.join('third_party', 'cares', 'cares'),
)
if 'darwin' in sys.platform:
CARES_INCLUDE += (os.path.join('third_party', 'cares', 'config_darwin'),)
if 'freebsd' in sys.platform:
CARES_INCLUDE += (os.path.join('third_party', 'cares', 'config_freebsd'),)
if 'linux' in sys.platform:
CARES_INCLUDE += (os.path.join('third_party', 'cares', 'config_linux'),)
if 'openbsd' in sys.platform:
CARES_INCLUDE += (os.path.join('third_party', 'cares', 'config_openbsd'),)
RE2_INCLUDE = (os.path.join('third_party', 're2'),)
SSL_INCLUDE = (os.path.join('third_party', 'boringssl-with-bazel', 'src',
'include'),)
UPB_INCLUDE = (os.path.join('third_party', 'upb'),)
UPB_GRPC_GENERATED_INCLUDE = (os.path.join('src', 'core', 'ext',
'upb-generated'),)
UPBDEFS_GRPC_GENERATED_INCLUDE = (os.path.join('src', 'core', 'ext',
'upbdefs-generated'),)
XXHASH_INCLUDE = (os.path.join('third_party', 'xxhash'),)
ZLIB_INCLUDE = (os.path.join('third_party', 'zlib'),)
README = os.path.join(PYTHON_STEM, 'README.rst')
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath(PYTHON_STEM))
# Break import-style to ensure we can actually find our in-repo dependencies.
import _parallel_compile_patch
import _spawn_patch
import grpc_core_dependencies
import commands
import grpc_version
_parallel_compile_patch.monkeypatch_compile_maybe()
_spawn_patch.monkeypatch_spawn()
LICENSE = 'Apache License 2.0'
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: Apache Software License',
]
def _env_bool_value(env_name, default):
"""Parses a bool option from an environment variable"""
return os.environ.get(env_name, default).upper() not in ['FALSE', '0', '']
BUILD_WITH_BORING_SSL_ASM = _env_bool_value('GRPC_BUILD_WITH_BORING_SSL_ASM',
'True')
# Export this environment variable to override the platform variant that will
# be chosen for boringssl assembly optimizations. This option is useful when
# crosscompiling and the host platform as obtained by distutils.utils.get_platform()
# doesn't match the platform we are targetting.
# Example value: "linux-aarch64"
BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM = os.environ.get(
'GRPC_BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM', '')
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support. Even if this
# is set to false, if the script detects that the generated `.c` file isn't
# present, then it will still attempt to use Cython.
BUILD_WITH_CYTHON = _env_bool_value('GRPC_PYTHON_BUILD_WITH_CYTHON', 'False')
# Export this variable to use the system installation of openssl. You need to
# have the header files installed (in /usr/include/openssl) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_OPENSSL = _env_bool_value('GRPC_PYTHON_BUILD_SYSTEM_OPENSSL',
'False')
# Export this variable to use the system installation of zlib. You need to
# have the header files installed (in /usr/include/) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_ZLIB = _env_bool_value('GRPC_PYTHON_BUILD_SYSTEM_ZLIB',
'False')
# Export this variable to use the system installation of cares. You need to
# have the header files installed (in /usr/include/) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_CARES = _env_bool_value('GRPC_PYTHON_BUILD_SYSTEM_CARES',
'False')
# Export this variable to use the system installation of re2. You need to
# have the header files installed (in /usr/include/re2) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_RE2 = _env_bool_value('GRPC_PYTHON_BUILD_SYSTEM_RE2', 'False')
# Export this variable to force building the python extension with a statically linked libstdc++.
# At least on linux, this is normally not needed as we can build manylinux-compatible wheels on linux just fine
# without statically linking libstdc++ (which leads to a slight increase in the wheel size).
# This option is useful when crosscompiling wheels for aarch64 where
# it's difficult to ensure that the crosscompilation toolchain has a high-enough version
# of GCC (we require >4.9) but still uses old-enough libstdc++ symbols.
# TODO(jtattermusch): remove this workaround once issues with crosscompiler version are resolved.
BUILD_WITH_STATIC_LIBSTDCXX = _env_bool_value(
'GRPC_PYTHON_BUILD_WITH_STATIC_LIBSTDCXX', 'False')
# For local development use only: This skips building gRPC Core and its
# dependencies, including protobuf and boringssl. This allows "incremental"
# compilation by first building gRPC Core using make, then building only the
# Python/Cython layers here.
#
# Note that this requires libboringssl.a in the libs/{dbg,opt}/ directory, which
# may require configuring make to not use the system openssl implementation:
#
# make HAS_SYSTEM_OPENSSL_ALPN=0
#
# TODO(ericgribkoff) Respect the BUILD_WITH_SYSTEM_* flags alongside this option
USE_PREBUILT_GRPC_CORE = _env_bool_value('GRPC_PYTHON_USE_PREBUILT_GRPC_CORE',
'False')
# If this environmental variable is set, GRPC will not try to be compatible with
# libc versions old than the one it was compiled against.
DISABLE_LIBC_COMPATIBILITY = _env_bool_value(
'GRPC_PYTHON_DISABLE_LIBC_COMPATIBILITY', 'False')
# Environment variable to determine whether or not to enable coverage analysis
# in Cython modules.
ENABLE_CYTHON_TRACING = _env_bool_value('GRPC_PYTHON_ENABLE_CYTHON_TRACING',
'False')
# Environment variable specifying whether or not there's interest in setting up
# documentation building.
ENABLE_DOCUMENTATION_BUILD = _env_bool_value(
'GRPC_PYTHON_ENABLE_DOCUMENTATION_BUILD', 'False')
def check_linker_need_libatomic():
"""Test if linker on system needs libatomic."""
code_test = (b'#include <atomic>\n' +
b'int main() { return std::atomic<int64_t>{}; }')
cxx = os.environ.get('CXX', 'c++')
cpp_test = subprocess.Popen([cxx, '-x', 'c++', '-std=c++11', '-'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
cpp_test.communicate(input=code_test)
if cpp_test.returncode == 0:
return False
# Double-check to see if -latomic actually can solve the problem.
# https://github.com/grpc/grpc/issues/22491
cpp_test = subprocess.Popen(
[cxx, '-x', 'c++', '-std=c++11', '-latomic', '-'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
cpp_test.communicate(input=code_test)
return cpp_test.returncode == 0
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = ' -std=c++11'
if 'win32' in sys.platform:
if sys.version_info < (3, 5):
EXTRA_ENV_COMPILE_ARGS += ' -D_hypot=hypot'
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if '32' in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
else:
# We need to statically link the C++ Runtime, only the C runtime is
# available dynamically
EXTRA_ENV_COMPILE_ARGS += ' /MT'
elif "linux" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -std=gnu99 -fvisibility=hidden -fno-wrapv -fno-exceptions'
elif "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -stdlib=libc++ -fvisibility=hidden -fno-wrapv -fno-exceptions -DHAVE_UNISTD_H'
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ''
if "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -lpthread'
if check_linker_need_libatomic():
EXTRA_ENV_LINK_ARGS += ' -latomic'
elif "win32" in sys.platform and sys.version_info < (3, 5):
msvcr = cygwinccompiler.get_msvcr()[0]
EXTRA_ENV_LINK_ARGS += (
' -static-libgcc -static-libstdc++ -mcrtdll={msvcr}'
' -static -lshlwapi'.format(msvcr=msvcr))
if "linux" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -Wl,-wrap,memcpy -static-libgcc'
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
if BUILD_WITH_STATIC_LIBSTDCXX:
EXTRA_LINK_ARGS.append('-static-libstdc++')
CYTHON_EXTENSION_PACKAGE_NAMES = ()
CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',)
CYTHON_HELPER_C_FILES = ()
CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
if "win32" in sys.platform:
CORE_C_FILES = filter(lambda x: 'third_party/cares' not in x, CORE_C_FILES)
if BUILD_WITH_SYSTEM_OPENSSL:
CORE_C_FILES = filter(lambda x: 'third_party/boringssl' not in x,
CORE_C_FILES)
CORE_C_FILES = filter(lambda x: 'src/boringssl' not in x, CORE_C_FILES)
SSL_INCLUDE = (os.path.join('/usr', 'include', 'openssl'),)
if BUILD_WITH_SYSTEM_ZLIB:
CORE_C_FILES = filter(lambda x: 'third_party/zlib' not in x, CORE_C_FILES)
ZLIB_INCLUDE = (os.path.join('/usr', 'include'),)
if BUILD_WITH_SYSTEM_CARES:
CORE_C_FILES = filter(lambda x: 'third_party/cares' not in x, CORE_C_FILES)
CARES_INCLUDE = (os.path.join('/usr', 'include'),)
if BUILD_WITH_SYSTEM_RE2:
CORE_C_FILES = filter(lambda x: 'third_party/re2' not in x, CORE_C_FILES)
RE2_INCLUDE = (os.path.join('/usr', 'include', 're2'),)
EXTENSION_INCLUDE_DIRECTORIES = ((PYTHON_STEM,) + CORE_INCLUDE + ABSL_INCLUDE +
ADDRESS_SORTING_INCLUDE + CARES_INCLUDE +
RE2_INCLUDE + SSL_INCLUDE + UPB_INCLUDE +
UPB_GRPC_GENERATED_INCLUDE +
UPBDEFS_GRPC_GENERATED_INCLUDE +
XXHASH_INCLUDE + ZLIB_INCLUDE)
EXTENSION_LIBRARIES = ()
if "linux" in sys.platform:
EXTENSION_LIBRARIES += ('rt',)
if not "win32" in sys.platform:
EXTENSION_LIBRARIES += ('m',)
if "win32" in sys.platform:
EXTENSION_LIBRARIES += (
'advapi32',
'ws2_32',
'dbghelp',
)
if BUILD_WITH_SYSTEM_OPENSSL:
EXTENSION_LIBRARIES += (
'ssl',
'crypto',
)
if BUILD_WITH_SYSTEM_ZLIB:
EXTENSION_LIBRARIES += ('z',)
if BUILD_WITH_SYSTEM_CARES:
EXTENSION_LIBRARIES += ('cares',)
if BUILD_WITH_SYSTEM_RE2:
EXTENSION_LIBRARIES += ('re2',)
DEFINE_MACROS = (('_WIN32_WINNT', 0x600),)
asm_files = []
# Quotes on Windows build macros are evaluated differently from other platforms,
# so we must apply quotes asymmetrically in order to yield the proper result in
# the binary.
def _quote_build_define(argument):
if "win32" in sys.platform:
return '"\\\"{}\\\""'.format(argument)
return '"{}"'.format(argument)
DEFINE_MACROS += (
("GRPC_XDS_USER_AGENT_NAME_SUFFIX", _quote_build_define("Python")),
("GRPC_XDS_USER_AGENT_VERSION_SUFFIX",
_quote_build_define(_metadata.__version__)),
)
asm_key = ''
if BUILD_WITH_BORING_SSL_ASM and not BUILD_WITH_SYSTEM_OPENSSL:
boringssl_asm_platform = BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM if BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM else util.get_platform(
)
LINUX_X86_64 = 'linux-x86_64'
LINUX_ARM = 'linux-arm'
LINUX_AARCH64 = 'linux-aarch64'
if LINUX_X86_64 == boringssl_asm_platform:
asm_key = 'crypto_linux_x86_64'
elif LINUX_ARM == boringssl_asm_platform:
asm_key = 'crypto_linux_arm'
elif LINUX_AARCH64 == boringssl_asm_platform:
asm_key = 'crypto_linux_aarch64'
elif "mac" in boringssl_asm_platform and "x86_64" in boringssl_asm_platform:
asm_key = 'crypto_mac_x86_64'
else:
print("ASM Builds for BoringSSL currently not supported on:",
boringssl_asm_platform)
if asm_key:
asm_files = grpc_core_dependencies.ASM_SOURCE_FILES[asm_key]
else:
DEFINE_MACROS += (('OPENSSL_NO_ASM', 1),)
if not DISABLE_LIBC_COMPATIBILITY:
DEFINE_MACROS += (('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),)
if "win32" in sys.platform:
# TODO(zyc): Re-enable c-ares on x64 and x86 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (
('WIN32_LEAN_AND_MEAN', 1),
('CARES_STATICLIB', 1),
('GRPC_ARES', 0),
('NTDDI_VERSION', 0x06000000),
('NOMINMAX', 1),
)
if '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
elif sys.version_info >= (3, 5):
# For some reason, this is needed to get access to inet_pton/inet_ntop
# on msvc, but only for 32 bits
DEFINE_MACROS += (('NTDDI_VERSION', 0x06000000),)
else:
DEFINE_MACROS += (
('HAVE_CONFIG_H', 1),
('GRPC_ENABLE_FORK_SUPPORT', 1),
)
LDFLAGS = tuple(EXTRA_LINK_ARGS)
CFLAGS = tuple(EXTRA_COMPILE_ARGS)
if "linux" in sys.platform or "darwin" in sys.platform:
pymodinit_type = 'PyObject*' if PY3 else 'void'
pymodinit = 'extern "C" __attribute__((visibility ("default"))) {}'.format(
pymodinit_type)
DEFINE_MACROS += (('PyMODINIT_FUNC', pymodinit),)
DEFINE_MACROS += (('GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK', 1),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python was built with.
# We need OSX 10.10, the oldest which supports C++ thread_local.
# Python 3.9: Mac OS Big Sur sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') returns int (11)
if 'darwin' in sys.platform:
mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if mac_target:
mac_target = pkg_resources.parse_version(str(mac_target))
if mac_target < pkg_resources.parse_version('10.10.0'):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.10'
os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
r'macosx-[0-9]+\.[0-9]+-(.+)', r'macosx-10.10-\1',
util.get_platform())
def cython_extensions_and_necessity():
cython_module_files = [
os.path.join(PYTHON_STEM,
name.replace('.', '/') + '.pyx')
for name in CYTHON_EXTENSION_MODULE_NAMES
]
config = os.environ.get('CONFIG', 'opt')
prefix = 'libs/' + config + '/'
if USE_PREBUILT_GRPC_CORE:
extra_objects = [
prefix + 'libares.a', prefix + 'libboringssl.a',
prefix + 'libgpr.a', prefix + 'libgrpc.a'
]
core_c_files = []
else:
core_c_files = list(CORE_C_FILES)
extra_objects = []
extensions = [
_extension.Extension(
name=module_name,
sources=([module_file] + list(CYTHON_HELPER_C_FILES) +
core_c_files + asm_files),
include_dirs=list(EXTENSION_INCLUDE_DIRECTORIES),
libraries=list(EXTENSION_LIBRARIES),
define_macros=list(DEFINE_MACROS),
extra_objects=extra_objects,
extra_compile_args=list(CFLAGS),
extra_link_args=list(LDFLAGS),
) for (module_name, module_file
) in zip(list(CYTHON_EXTENSION_MODULE_NAMES), cython_module_files)
]
need_cython = BUILD_WITH_CYTHON
if not BUILD_WITH_CYTHON:
need_cython = need_cython or not commands.check_and_update_cythonization(
extensions)
# TODO: the strategy for conditional compiling and exposing the aio Cython
# dependencies will be revisited by https://github.com/grpc/grpc/issues/19728
return commands.try_cythonize(extensions,
linetracing=ENABLE_CYTHON_TRACING,
mandatory=BUILD_WITH_CYTHON), need_cython
CYTHON_EXTENSION_MODULES, need_cython = cython_extensions_and_necessity()
PACKAGE_DIRECTORIES = {
'': PYTHON_STEM,
}
INSTALL_REQUIRES = (
"six>=1.5.2",
"futures>=2.2.0; python_version<'3.2'",
"enum34>=1.0.4; python_version<'3.4'",
)
EXTRAS_REQUIRES = {
'protobuf': 'grpcio-tools>={version}'.format(version=grpc_version.VERSION),
}
SETUP_REQUIRES = INSTALL_REQUIRES + (
'Sphinx~=1.8.1',
'six>=1.10',
) if ENABLE_DOCUMENTATION_BUILD else ()
try:
import Cython
except ImportError:
if BUILD_WITH_CYTHON:
sys.stderr.write(
"You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, "
"but do not have Cython installed. We won't stop you from using "
"other commands, but the extension files will fail to build.\n")
elif need_cython:
sys.stderr.write(
'We could not find Cython. Setup may take 10-20 minutes.\n')
SETUP_REQUIRES += ('cython>=0.23',)
COMMAND_CLASS = {
'doc': commands.SphinxDocumentation,
'build_project_metadata': commands.BuildProjectMetadata,
'build_py': commands.BuildPy,
'build_ext': commands.BuildExt,
'gather': commands.Gather,
'clean': commands.Clean,
}
# Ensure that package data is copied over before any commands have been run:
credentials_dir = os.path.join(PYTHON_STEM, 'grpc', '_cython', '_credentials')
try:
os.mkdir(credentials_dir)
except OSError:
pass
shutil.copyfile(os.path.join('etc', 'roots.pem'),
os.path.join(credentials_dir, 'roots.pem'))
PACKAGE_DATA = {
# Binaries that may or may not be present in the final installation, but are
# mentioned here for completeness.
'grpc._cython': [
'_credentials/roots.pem',
'_windows/grpc_c.32.python',
'_windows/grpc_c.64.python',
],
}
PACKAGES = setuptools.find_packages(PYTHON_STEM)
setuptools.setup(
name='grpcio',
version=grpc_version.VERSION,
description='HTTP/2-based RPC framework',
author='The gRPC Authors',
author_email='[email protected]',
url='https://grpc.io',
license=LICENSE,
classifiers=CLASSIFIERS,
long_description=open(README).read(),
ext_modules=CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
package_data=PACKAGE_DATA,
python_requires='>=3.6',
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS,
)
|
the-stack_0_25132
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import cv2, numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--path', default='../data/Lena.png', help='Image path.')
params = parser.parse_args()
image = cv2.imread(params.path)
image_to_show = np.copy(image)
mouse_pressed = False
s_x = s_y = e_x = e_y = -1
def mouse_callback(event, x, y, flags, param):
global image_to_show, s_x, s_y, e_x, e_y, mouse_pressed
if event == cv2.EVENT_LBUTTONDOWN:
mouse_pressed = True
s_x, s_y = x, y
image_to_show = np.copy(image)
elif event == cv2.EVENT_MOUSEMOVE:
if mouse_pressed:
image_to_show = np.copy(image)
cv2.rectangle(image_to_show, (s_x, s_y),
(x, y), (0, 255, 0), 1)
elif event == cv2.EVENT_LBUTTONUP:
mouse_pressed = False
e_x, e_y = x, y
cv2.namedWindow('image')
cv2.setMouseCallback('image', mouse_callback)
while True:
cv2.imshow('image', image_to_show)
k = cv2.waitKey(1)
if k == ord('c'):
if s_y > e_y:
s_y, e_y = e_y, s_y
if s_x > e_x:
s_x, e_x = e_x, s_x
if e_y - s_y > 1 and e_x - s_x > 0:
image = image[s_y:e_y, s_x:e_x]
image_to_show = np.copy(image)
elif k == 27:
break
cv2.destroyAllWindows()
|
the-stack_0_25133
|
"""Packet classes for Control-J Monitors."""
from ..utils import mulaw_to_value, data_to_binstring
from ..const import MANUFACTURER_SPECIFIC_DATA_TYPE, CJ_TEMPHUM_TYPE, COMPLETE_LOCALE_NAME_DATA_TYPE
class CJMonitorAdvertisement(object):
"""CJ Monitor advertisement."""
def __init__(self, frame):
for ltv in frame:
if ltv['type'] == MANUFACTURER_SPECIFIC_DATA_TYPE:
msd = ltv['value']
self._company_id = msd['company_identifier']
self._beacon_type = msd['data']['beacon_type']
if self._beacon_type == CJ_TEMPHUM_TYPE:
data = msd['data']['data']
self._temperature = data['temperature'] / 100.0
self._humidity = data['humidity']
self._light = mulaw_to_value(data['light']) / 10.0
elif ltv['type'] == COMPLETE_LOCALE_NAME_DATA_TYPE:
self._name = data_to_binstring(ltv['value']).decode("ascii")
@property
def name(self):
"""device name"""
return self._name
@property
def humidity(self):
"""humidity in %"""
return self._humidity
@property
def company_id(self):
"""company ID"""
return self._company_id
@property
def beacon_type(self):
"""type of this beacon"""
return self._beacon_type
@property
def temperature(self):
"""temperature in C."""
return self._temperature
@property
def light(self):
"""light level in lux"""
return self._light
@property
def properties(self):
"""Get Monitor properties."""
return {'name': self.name,
'temperature': self.temperature,
'humidity': self.humidity,
'light': self.light,
'company_id': self.company_id,
'beacon_type': self.beacon_type}
def __str__(self):
return "CJMonitorAdvertisement<name: {name}, temp: {temperature:.1f}," \
"humidity: {humidity:d}, light: {light:.0f}>".format(
name=self.name, temperature=self.temperature,
humidity=self.humidity, light=self.light)
|
the-stack_0_25134
|
import fnmatch
import os
import sys
from sqlite3 import *
from typing import List, Any
__all__ = ['ControllerDatabase']
class ControllerDatabase:
init: bool = False
def __init__(self,
db_name: str,
sql_file_name: str = f"data_model_table_create.sql"
):
self.connection: Connection = connect(db_name, check_same_thread=False, timeout=5)
self.cursor: Cursor = self.connection.cursor()
try:
sql_file_path = find(sql_file_name, '../')
with open(sql_file_path[0], 'r', encoding="utf-8") as file:
if not file:
print(f"")
raise FileNotFoundError(f"Could not find {sql_file_path}")
sql_script: str = "".join(file.readlines())
connection: Connection = connect(db_name)
connection.cursor().executescript(
sql_script
)
except Exception as exp:
tb = sys.exc_info()[2]
print(exp.with_traceback(tb))
print()
@staticmethod
def initialize(db_name: str, sql_file_path: str) -> bool:
if ControllerDatabase.init:
print(f"Database '{db_name}' already initialized")
return False
with open(sql_file_path, 'r', encoding="utf-8") as file:
if not file:
print(f"")
return False
sql_script: str = "".join(file.readlines())
connection: Connection = connect(db_name)
connection.cursor().execute(
sql_script
)
ControllerDatabase.init = True
return True
def run_query(self, query: str):
self.cursor.execute(query)
def run_query_with_args(self, query: str, args: dict):
self.connection.cursor().execute(query, args)
def fetch_all_results_from_last_query(self) -> List[Any]:
return self.cursor.fetchall()
def fetch_one_result_from_last_query(self) -> Any:
return self.cursor.fetchone()
def fetch_some_results_from_last_query(self, number_of_queries: int = 5) -> List[Any]:
return self.cursor.fetchmany(number_of_queries)
def save_changes(self):
self.connection.commit()
def __del__(self):
self.connection.close()
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
|
the-stack_0_25136
|
#
# @file from https://github.com/Neutree/c_cpp_project_framework
# @author neucrack
#
import argparse
import os, sys, time, re
import subprocess
time_str_header = '''
#define BUILD_TIME_YEAR {}
#define BUILD_TIME_MONTH {}
#define BUILD_TIME_DAY {}
#define BUILD_TIME_HOUR {}
#define BUILD_TIME_MINUTE {}
#define BUILD_TIME_SECOND {}
#define BUILD_TIME_WEEK_OF_DAY {}
#define BUILD_TIME_YEAR_OF_DAY {}
'''
time_str_cmake = '''
set(BUILD_TIME_YEAR "{}")
set(BUILD_TIME_MONTH "{}")
set(BUILD_TIME_DAY "{}")
set(BUILD_TIME_HOUR "{}")
set(BUILD_TIME_MINUTE "{}")
set(BUILD_TIME_SECOND "{}")
set(BUILD_TIME_WEEK_OF_DAY "{}")
set(BUILD_TIME_YEAR_OF_DAY "{}")
'''
time_str_makefile = '''
BUILD_TIME_YEAR ={}
BUILD_TIME_MONTH ={}
BUILD_TIME_DAY ={}
BUILD_TIME_HOUR ={}
BUILD_TIME_MINUTE ={}
BUILD_TIME_SECOND ={}
BUILD_TIME_WEEK_OF_DAY ={}
BUILD_TIME_YEAR_OF_DAY ={}
'''
git_str_header = '''
#define BUILD_VERSION_MAJOR {}
#define BUILD_VERSION_MINOR {}
#define BUILD_VERSION_MICRO {}
#define BUILD_VERSION_DEV {}
#define BUILD_GIT_COMMIT_ID "{}"
#define BUILD_GIT_IS_DIRTY {}
'''
git_str_cmake = '''
set(BUILD_VERSION_MAJOR "{}")
set(BUILD_VERSION_MINOR "{}")
set(BUILD_VERSION_MICRO "{}")
set(BUILD_VERSION_DEV "{}")
set(BUILD_GIT_COMMIT_ID "{}")
set(BUILD_GIT_IS_DIRTY "{}")
'''
git_str_makefile = '''
BUILD_VERSION_MAJOR ={}
BUILD_VERSION_MINOR ={}
BUILD_VERSION_MICRO ={}
BUILD_VERSION_DEV ={}
BUILD_GIT_COMMIT_ID ={}
BUILD_GIT_IS_DIRTY ={}
'''
str_define_start_makefile = "\n# compile append define start\n"
str_define_end_makefile = "\n# compile append define end\n"
str_define_start_cmake = "\n# compile append define start\n"
str_define_end_cmake = "\n# compile append define end\n"
str_define_start_header = "\n//compile append define start\n"
str_define_end_header = "\n//compile append define end\n"
INFO_FORMAT_STR = {"header": [str_define_start_header, str_define_end_header, time_str_header, git_str_header],
"cmake": [str_define_start_cmake, str_define_end_cmake, time_str_cmake, git_str_cmake],
"makefile": [str_define_start_makefile, str_define_end_makefile, time_str_makefile, git_str_makefile],
}
def remove_old_config_info(start_flag_str, end_flag_str, content):
match = re.findall(r"{}(.*){}".format(start_flag_str, end_flag_str), content, re.MULTILINE|re.DOTALL)
if len(match) == 0:
content += start_flag_str+end_flag_str
else:
content = content.replace(match[0], "")
return content
def append_time_info(time_info_filename, version_info_filename, file_type):
str_time_define_start = INFO_FORMAT_STR[file_type][0]
str_time_define_end = INFO_FORMAT_STR[file_type][1]
append_format_time_str= INFO_FORMAT_STR[file_type][2]
append_format_git_str = INFO_FORMAT_STR[file_type][3]
content = ""
content2 = ""
content2_old = content2
try:
f = open(time_info_filename)
content = f.read()
f.close()
except Exception:
pass
if version_info_filename:
try:
f = open(version_info_filename)
content2 = f.read()
content2_old = content2
f.close()
except Exception:
pass
time_now = time.localtime(time.time())
# remove old config info
content = remove_old_config_info(str_time_define_start, str_time_define_end, content)
content2 = remove_old_config_info(str_time_define_start, str_time_define_end, content2)
# time info
time_define = append_format_time_str.format(time_now.tm_year,
time_now.tm_mon,
time_now.tm_mday,
time_now.tm_hour,
time_now.tm_min,
time_now.tm_sec,
time_now.tm_wday,
time_now.tm_yday)
# git info
# add tag by command;
# git tag -a v0.1.1 -m "release v0.1.1 describe....."
# git push origin --tags
git_tag_name = ""
version_major = 0
version_minor = 0
version_micro = 0
version_dev = 0
git_hash = ""
git_dirty = ""
try:
git_tag = subprocess.check_output(["git", "describe", "--long", "--tag", "--dirty", "--always"], stderr=subprocess.STDOUT, universal_newlines=True).strip()
except subprocess.CalledProcessError as er:
if er.returncode == 128:
# git exit code of 128 means no repository found
print("== WARNING: NOT a git repository !!!")
git_tag = ""
except OSError:
git_tag = ""
# git_tag = "v0.3.2-39-gbeae86483-dirty"
git_tag = git_tag.split("-")
if len(git_tag) == 0:
print("== WARNING: git get info fail")
if len(git_tag) == 1: # bdc1dcf
git_hash = git_tag[0]
elif len(git_tag) == 2: # bdc1dcf-dirty or v0.1.1-bdc1dcf
if git_tag[1] == "dirty":
git_hash = git_tag[0]
git_dirty = git_tag[1]
else:
git_tag_name = git_tag[0]
git_hash = git_tag[1]
elif len(git_tag) == 3: # v0.1.1-10-bdc1dcf or v0.1.1-bdc1dcf-dirty
if git_tag[2] == "dirty":
git_tag_name = git_tag[0]
git_hash = git_tag[1]
git_dirty = git_tag[2]
else:
git_tag_name = git_tag[0]+"."+git_tag[1]
git_hash = git_tag[2]
else: # v0.1.1-10-bdc1dcf-dirty
git_tag_name = git_tag[0]+"."+git_tag[1]
git_hash = git_tag[2]
git_dirty = git_tag[3]
if git_tag_name.lower().startswith("v"):
version = git_tag_name[1:].split(".")
# convert to int from str
for i,v in enumerate(version):
try:
version[i] = int(v)
except Exception:
version[i] = 0
if len(version) >= 1:
version_major = version[0]
if len(version) >= 2:
version_minor = version[1]
if len(version) >= 3:
version_micro = version[2]
if len(version) >= 4:
version_dev = version[3]
if file_type == "header":
dirty_value = 1 if git_dirty=="dirty" else 0
elif file_type == "cmake":
dirty_value = "y" if git_dirty=="dirty" else ""
else:
if git_dirty=="dirty":
dirty_value = "y"
else:
append_format_git_str = append_format_git_str.replace("BUILD_GIT_IS_DIRTY", "# BUILD_GIT_IS_DIRTY")
dirty_value = " is not set"
git_define = append_format_git_str.format(version_major,
version_minor,
version_micro,
version_dev,
git_hash,
dirty_value)
# append time and git info to content
content = content.split(str_time_define_end)
if not version_info_filename:
content = (time_define+git_define+str_time_define_end).join(content)
else:
content2 = content2.split(str_time_define_end)
content = (time_define+str_time_define_end).join(content)
content2 = (git_define+str_time_define_end).join(content2)
# update config file
with open(time_info_filename, "w") as f:
f.write(content)
if version_info_filename and content2 != content2_old:
with open(version_info_filename, "w") as f:
f.write(content2)
def write_config(filename):
print("-- Update build time and version info to makefile config at: " + str(filename))
time_info_filename = None
version_info_filename = None
if filename[0] != None and filename[0].lower() != "none" and os.path.exists(filename[0]):
time_info_filename = filename[0]
if filename[1] != None and filename[1].lower() != "none" and os.path.exists(filename[1]):
version_info_filename = filename[1]
if time_info_filename == None:
raise Exception("param error")
append_time_info(time_info_filename, version_info_filename, "makefile")
def write_cmake(filename):
print("-- Update build time and version info to cmake config at: " + str(filename))
time_info_filename = None
version_info_filename = None
if filename[0] != None and filename[0].lower() != "none" and os.path.exists(filename[0]):
time_info_filename = filename[0]
if filename[1] != None and filename[1].lower() != "none" and os.path.exists(filename[1]):
version_info_filename = filename[1]
if time_info_filename == None:
raise Exception("param error")
append_time_info(time_info_filename, version_info_filename, "cmake")
def write_header(filename):
print("-- Update build time and version info to header config at: " + str(filename))
time_info_filename = None
version_info_filename = None
if filename[0] != None and filename[0].lower() != "none":
time_info_filename = filename[0]
if filename[1] != None and filename[1].lower() != "none":
version_info_filename = filename[1]
if time_info_filename == None:
raise Exception("param error")
append_time_info(time_info_filename, version_info_filename, "header")
parser = argparse.ArgumentParser(description='generate time info for', prog=os.path.basename(sys.argv[0]))
OUTPUT_FORMATS = {"makefile": write_config,
"header": write_header,
"cmake": write_cmake
}
parser.add_argument('--configfile', nargs=3, action='append',
help='Write config file (format and output filename), version_filename can be None so all version info will append to time_filename',
metavar=('FORMAT', 'TIME_FILENAME', "VERSION_FILENAME"),
default=[])
args = parser.parse_args()
out_format = {}
for fmt, filename, version_filename in args.configfile:
if fmt not in OUTPUT_FORMATS.keys():
print("Format %s not supported! Known formats:%s" %(fmt, OUTPUT_FORMATS.keys()))
sys.exit(1)
out_format[fmt] = (filename, version_filename)
for fmt, filename in out_format.items():
# if not os.path.exists(filename):
# print("File not found:%s" %(filename))
# not check always create
func = OUTPUT_FORMATS[fmt]
func(filename)
|
the-stack_0_25140
|
from django import template
from django.contrib.contenttypes.models import ContentType
register = template.Library()
@register.simple_tag
def create_object_count(app=None):
"""fetches all models of the passed in app and returns a
dict containg the name of each class and the number of instances
Be aware that ContentType fetches the models name from the database. This might cause
problems if you are using verbose names on the classes which differ from the actual class name.
"""
if app:
models = ContentType.objects.filter(app_label=app)
result = []
for x in models:
modelname = x.name
modelname = modelname.replace(" ", "").lower()
try:
fetched_model = ContentType.objects.get(
app_label=app, model=modelname).model_class()
item = {
'name': modelname.title(),
'count': fetched_model.objects.count()
}
except:
item = {
'name': x,
'count': "Some error occured"
}
try:
item['link'] = fetched_model.get_listview_url()
except AttributeError:
item['link'] = None
result.append(item)
return result
else:
result = [
{
'name': 'no parameter passed in',
'count': '1'
}
]
return result
|
the-stack_0_25144
|
import os
from collections import Counter
from pprint import pprint
from tqdm import tqdm
from util import data_io
from utils import BLANK_SYMBOL
def build_vocabulary(
corpus_file="spanish_train.jsonl",
vocab_file="data/labels/vocabulary.json",
min_freq=1000,
):
text_g = (t for _, t in data_io.read_jsonl(corpus_file))
counter = Counter((c.lower() for t in tqdm(text_g) for c in t))
vocab = counter.most_common(200)
data_io.write_json(
vocab_file.replace(".json", "_freqs.json"),
[(c, f) for c, f in vocab if f > min_freq],
)
data_io.write_json(vocab_file, [BLANK_SYMBOL] + [c for c, f in vocab if f > min_freq])
if __name__ == "__main__":
jsonl = os.environ["HOME"] + "/data/asr_data/SPANISH/spanish_train.jsonl"
build_vocabulary(jsonl, "spanish_vocab.json")
|
the-stack_0_25145
|
"""added song length
Revision ID: ea5c888ca880
Revises: d12a3744e77f
Create Date: 2021-04-06 21:43:14.518588
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "ea5c888ca880"
down_revision = "d12a3744e77f"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("song", sa.Column("length", sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("song", "length")
# ### end Alembic commands ###
|
the-stack_0_25146
|
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from dstk.bivariate_test import cal_information_value
def test_iv():
df = pd.DataFrame(
[
[1, 2, 0, 1],
[2, 4, 0, 0],
[3, 6, 0, 0],
[4, 8, 1, 1],
[5, 10, 1, 0],
],
columns=["a", "b", "c", "y"])
iv_df = cal_information_value(df.drop(columns=["y"]), df["y"])
print(iv_df)
#assert_frame_equal(psi_df, df_result, check_like=True)
test_iv()
|
the-stack_0_25147
|
"""An example of how to use Zookeeper to run a Larq BinaryNet experiment on MNIST."""
from functools import partial
from typing import Sequence, Tuple, Union
import larq as lq
import tensorflow as tf
from zookeeper import ComponentField, Field, cli, component, factory, task
from zookeeper.tf import Dataset, Experiment, Preprocessing, TFDSDataset
@component
class Mnist(TFDSDataset):
name = Field("mnist")
train_split = Field("train")
validation_split = Field("test")
@component
class PadCropAndFlip(Preprocessing):
pad_size: int = Field()
def input(self, data, training):
image = data["image"]
if training:
image = tf.image.resize_with_crop_or_pad(
image, self.pad_size, self.pad_size
)
image = tf.image.random_crop(image, self.input_shape)
image = tf.image.random_flip_left_right(image)
else:
image = tf.image.resize_with_crop_or_pad(image, *self.input_shape[:2])
return tf.cast(image, tf.float32) / (255.0 / 2.0) - 1.0
def output(self, data):
return data["label"]
@factory
class BinaryNet:
dataset: Dataset = ComponentField()
preprocessing: Preprocessing = ComponentField()
filters: int = Field(128)
dense_units: int = Field(1024)
kernel_size: Union[int, Tuple[int, int]] = Field((3, 3))
input_shape: Tuple[int, int, int] = Field()
def build(self) -> tf.keras.models.Model:
kwhparams = dict(
input_quantizer="ste_sign",
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
use_bias=False,
)
return tf.keras.models.Sequential(
[
# Don't quantize inputs in first layer
lq.layers.QuantConv2D(
self.filters,
self.kernel_size,
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
use_bias=False,
input_shape=self.input_shape,
),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantConv2D(
self.filters, self.kernel_size, padding="same", **kwhparams
),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantConv2D(
2 * self.filters, self.kernel_size, padding="same", **kwhparams
),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantConv2D(
2 * self.filters, self.kernel_size, padding="same", **kwhparams
),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantConv2D(
4 * self.filters, self.kernel_size, padding="same", **kwhparams
),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantConv2D(
4 * self.filters, self.kernel_size, padding="same", **kwhparams
),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
tf.keras.layers.BatchNormalization(scale=False),
tf.keras.layers.Flatten(),
lq.layers.QuantDense(self.dense_units, **kwhparams),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantDense(self.dense_units, **kwhparams),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantDense(self.dataset.num_classes, **kwhparams),
tf.keras.layers.BatchNormalization(scale=False),
tf.keras.layers.Activation("softmax"),
]
)
@task
class BinaryNetMnist(Experiment):
dataset = ComponentField(Mnist)
input_shape: Tuple[int, int, int] = Field((28, 28, 1))
preprocessing = ComponentField(PadCropAndFlip, pad_size=32)
model: tf.keras.models.Model = ComponentField(BinaryNet)
epochs = Field(100)
batch_size = Field(128)
learning_rate: float = Field(5e-3)
loss = Field("sparse_categorical_crossentropy")
metrics: Sequence[str] = Field(lambda: ["accuracy"])
@Field
def optimizer(self):
return tf.keras.optimizers.Adam(self.learning_rate)
def run(self):
train_data, num_train_examples = self.dataset.train()
train_data = (
train_data.cache()
.shuffle(10 * self.batch_size)
.repeat()
.map(partial(self.preprocessing, training=True))
.batch(self.batch_size)
)
validation_data, num_validation_examples = self.dataset.validation()
validation_data = (
validation_data.cache()
.repeat()
.map(self.preprocessing)
.batch(self.batch_size)
)
self.model.compile(
optimizer=self.optimizer, loss=self.loss, metrics=self.metrics
)
lq.models.summary(self.model)
self.model.fit(
train_data,
epochs=self.epochs,
steps_per_epoch=num_train_examples // self.batch_size,
validation_data=validation_data,
validation_steps=num_validation_examples // self.batch_size,
)
if __name__ == "__main__":
cli()
|
the-stack_0_25149
|
import traci
traci.start(['sumo-gui', '-c', 'test.sumocfg', '--device.taxi.dispatch-algorithm', 'traci'])
step = 0
traci.route.add('r1', ['edge1', 'edge2'])
traci.route.add('r2', ['edge2', 'edge3'])
traci.route.add('r3', ['edge3', 'edge4'])
traci.route.add('r4', ['edge4', 'edge1'])
traci.person.add('per1', 'edge1', 0)
traci.person.appendDrivingStage('per1', 'edge2', 'taxi')
traci.person.setColor('per1', (255, 0, 0))
traci.vehicle.add('taxi1', 'r2', typeID='taxi')
while step < 1000:
traci.simulationStep()
step += 1
reservations = traci.person.getTaxiReservations(False)
print(traci.person.getStage('per1').description)
if len(reservations) > 0:
print(reservations)
traci.vehicle.dispatchTaxi('taxi1', reservations[0].id)
traci.close()
|
the-stack_0_25151
|
import datetime
import os
from pathlib import Path
from typing import Any, List, Tuple
import pkg_resources
from chia.util.ssl_check import DEFAULT_PERMISSIONS_CERT_FILE, DEFAULT_PERMISSIONS_KEY_FILE
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.x509.oid import NameOID
def get_chia_ca_crt_key() -> Tuple[Any, Any]:
crt = pkg_resources.resource_string(__name__, "heather_ca.crt")
key = pkg_resources.resource_string(__name__, "heather_ca.key")
return crt, key
def get_mozilla_ca_crt() -> str:
mozilla_path = Path(__file__).parent.parent.parent.absolute() / "mozilla-ca/cacert.pem"
return str(mozilla_path)
def write_ssl_cert_and_key(cert_path: Path, cert_data: bytes, key_path: Path, key_data: bytes):
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
for path, data, mode in [
(cert_path, cert_data, DEFAULT_PERMISSIONS_CERT_FILE),
(key_path, key_data, DEFAULT_PERMISSIONS_KEY_FILE),
]:
if path.exists():
path.unlink()
with open(os.open(str(path), flags, mode), "wb") as f:
f.write(data) # lgtm [py/clear-text-storage-sensitive-data]
def ensure_ssl_dirs(dirs: List[Path]):
"""Create SSL dirs with a default 755 mode if necessary"""
for dir in dirs:
if not dir.exists():
dir.mkdir(mode=0o755)
def generate_ca_signed_cert(ca_crt: bytes, ca_key: bytes, cert_out: Path, key_out: Path):
one_day = datetime.timedelta(1, 0, 0)
root_cert = x509.load_pem_x509_certificate(ca_crt, default_backend())
root_key = load_pem_private_key(ca_key, None, default_backend())
cert_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
new_subject = x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, "Heather"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Heather"),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "Heather Blockchain"),
]
)
cert = (
x509.CertificateBuilder()
.subject_name(new_subject)
.issuer_name(root_cert.issuer)
.public_key(cert_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.today() - one_day)
.not_valid_after(datetime.datetime(2100, 8, 2))
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("heathernetwork.io")]),
critical=False,
)
.sign(root_key, hashes.SHA256(), default_backend())
)
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = cert_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
write_ssl_cert_and_key(cert_out, cert_pem, key_out, key_pem)
def make_ca_cert(cert_path: Path, key_path: Path):
root_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
subject = issuer = x509.Name(
[
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Heather"),
x509.NameAttribute(NameOID.COMMON_NAME, "Heather CA"),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "Heather Blockchain"),
]
)
root_cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(root_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=7300))
.add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=True)
.sign(root_key, hashes.SHA256(), default_backend())
)
cert_pem = root_cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = root_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
write_ssl_cert_and_key(cert_path, cert_pem, key_path, key_pem)
def main():
return make_ca_cert(Path("./heather_ca.crt"), Path("./heather_ca.key"))
if __name__ == "__main__":
main()
|
the-stack_0_25152
|
"""Helper functions and classes for users.
They should not be used in skorch directly.
"""
from collections import Sequence
from functools import partial
import numpy as np
from skorch.cli import parse_args
from skorch.utils import _make_split
from skorch.utils import is_torch_data_type
class SliceDict(dict):
"""Wrapper for Python dict that makes it sliceable across values.
Use this if your input data is a dictionary and you have problems
with sklearn not being able to slice it. Wrap your dict with
SliceDict and it should usually work.
Note:
* SliceDict cannot be indexed by integers, if you want one row,
say row 3, use `[3:4]`.
* SliceDict accepts numpy arrays and torch tensors as values.
Examples
--------
>>> X = {'key0': val0, 'key1': val1}
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> Xs = SliceDict(key0=val0, key1=val1) # or Xs = SliceDict(**X)
>>> search.fit(Xs, y) # works
"""
def __init__(self, **kwargs):
lengths = [value.shape[0] for value in kwargs.values()]
lengths_set = set(lengths)
if lengths_set and (len(lengths_set) != 1):
raise ValueError(
"Initialized with items of different lengths: {}"
"".format(', '.join(map(str, sorted(lengths_set)))))
if not lengths:
self._len = 0
else:
self._len = lengths[0]
super(SliceDict, self).__init__(**kwargs)
def __len__(self):
return self._len
def __getitem__(self, sl):
if isinstance(sl, int):
# Indexing with integers is not well-defined because that
# recudes the dimension of arrays by one, messing up
# lengths and shapes.
raise ValueError("SliceDict cannot be indexed by integers.")
if isinstance(sl, str):
return super(SliceDict, self).__getitem__(sl)
return SliceDict(**{k: v[sl] for k, v in self.items()})
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("Key must be str, not {}.".format(type(key)))
length = value.shape[0]
if not self.keys():
self._len = length
if self._len != length:
raise ValueError(
"Cannot set array with shape[0] != {}"
"".format(self._len))
super(SliceDict, self).__setitem__(key, value)
def update(self, kwargs):
for key, value in kwargs.items():
self.__setitem__(key, value)
def __repr__(self):
out = super(SliceDict, self).__repr__()
return "SliceDict(**{})".format(out)
@property
def shape(self):
return (self._len,)
def copy(self):
return type(self)(**self)
def fromkeys(self, *args, **kwargs):
"""fromkeys method makes no sense with SliceDict and is thus not
supported."""
raise TypeError("SliceDict does not support fromkeys.")
def __eq__(self, other):
if self.keys() != other.keys():
return False
for key, val in self.items():
val_other = other[key]
# torch tensors
if is_torch_data_type(val):
if not is_torch_data_type(val_other):
return False
if not (val == val_other).all():
return False
continue
# numpy arrays
if isinstance(val, np.ndarray):
if not isinstance(val_other, np.ndarray):
return False
if not (val == val_other).all():
return False
continue
# rest
if val != val_other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# This class must be an instance of Sequence and have an ndim
# attribute because sklearn will test this.
class SliceDataset(Sequence):
# pylint: disable=anomalous-backslash-in-string
"""Helper class that wraps a torch dataset to make it work with
sklearn.
Sometimes, sklearn will touch the input data, e.g. when splitting
the data for a grid search. This will fail when the input data is
a torch dataset. To prevent this, use this wrapper class for your
dataset.
Note: This class will only return the X value by default (i.e. the
first value returned by indexing the original dataset). Sklearn,
and hence skorch, always require 2 values, X and y. Therefore, you
still need to provide the y data separately.
Note: This class behaves similarly to a PyTorch
:class:`~torch.utils.data.Subset` when it is indexed by a slice or
numpy array: It will return another ``SliceDataset`` that
references the subset instead of the actual values. Only when it
is indexed by an int does it return the actual values. The reason
for this is to avoid loading all data into memory when sklearn,
for instance, creates a train/validation split on the
dataset. Data will only be loaded in batches during the fit loop.
Examples
--------
>>> X = MyCustomDataset()
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> ds = SliceDataset(X)
>>> search.fit(ds, y) # works
Parameters
----------
dataset : torch.utils.data.Dataset
A valid torch dataset.
idx : int (default=0)
Indicates which element of the dataset should be
returned. Typically, the dataset returns both X and y
values. SliceDataset can only return 1 value. If you want to
get X, choose idx=0 (default), if you want y, choose idx=1.
indices : list, np.ndarray, or None (default=None)
If you only want to return a subset of the dataset, indicate
which subset that is by passing this argument. Typically, this
can be left to be None, which returns all the data. See also
:class:`~torch.utils.data.Subset`.
"""
def __init__(self, dataset, idx=0, indices=None):
self.dataset = dataset
self.idx = idx
self.indices = indices
self.indices_ = (self.indices if self.indices is not None
else np.arange(len(self.dataset)))
self.ndim = 1
def __len__(self):
return len(self.indices_)
@property
def shape(self):
return (len(self),)
def transform(self, data):
"""Additional transformations on ``data``.
Note: If you use this in conjuction with PyTorch
:class:`~torch.utils.data.DataLoader`, the latter will call
the dataset for each row separately, which means that the
incoming ``data`` is a single rows.
"""
return data
def _select_item(self, Xn):
# Raise a custom error message when accessing out of
# bounds. However, this will only trigger as soon as this is
# indexed by an integer.
try:
return Xn[self.idx]
except IndexError:
name = self.__class__.__name__
msg = ("{} is trying to access element {} but there are only "
"{} elements.".format(name, self.idx, len(Xn)))
raise IndexError(msg)
def __getitem__(self, i):
if isinstance(i, (int, np.integer)):
Xn = self.dataset[self.indices_[i]]
Xi = self._select_item(Xn)
return self.transform(Xi)
if isinstance(i, slice):
return SliceDataset(self.dataset, idx=self.idx, indices=self.indices_[i])
if isinstance(i, np.ndarray):
if i.ndim != 1:
raise IndexError("SliceDataset only supports slicing with 1 "
"dimensional arrays, got {} dimensions instead."
"".format(i.ndim))
if i.dtype == np.bool:
i = np.flatnonzero(i)
return SliceDataset(self.dataset, idx=self.idx, indices=self.indices_[i])
def predefined_split(dataset):
"""Uses ``dataset`` for validiation in ``NeutralNet``.
Examples
--------
>>> valid_ds = skorch.Dataset(X, y)
>>> net = NeutralNet(..., train_split=predefined_split(valid_ds))
Parameters
----------
dataset: torch Dataset
Validiation dataset
"""
return partial(_make_split, valid_ds=dataset)
|
the-stack_0_25153
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global parameters for the VGGish model.
See vggish_slim.py for more information.
"""
# Architectural constants.
NUM_FRAMES = 96 # Frames in input mel-spectrogram patch.
NUM_BANDS = 64 # Frequency bands in input mel-spectrogram patch.
EMBEDDING_SIZE = 128 # Size of embedding layer.
# Hyperparameters used in feature and example generation.
SAMPLE_RATE = 16000
STFT_WINDOW_LENGTH_SECONDS = 0.025
STFT_HOP_LENGTH_SECONDS = 0.010
NUM_MEL_BINS = NUM_BANDS
MEL_MIN_HZ = 125
MEL_MAX_HZ = 7500
LOG_OFFSET = 0.01 # Offset used for stabilized log of input mel-spectrogram.
EXAMPLE_WINDOW_SECONDS = 0.96 # Each example contains 96 10ms frames
EXAMPLE_HOP_SECONDS = 0.96 # with zero overlap.
# Parameters used for embedding postprocessing.
PCA_EIGEN_VECTORS_NAME = 'pca_eigen_vectors'
PCA_MEANS_NAME = 'pca_means'
QUANTIZE_MIN_VAL = -2.0
QUANTIZE_MAX_VAL = +2.0
# Hyperparameters used in training.
INIT_STDDEV = 0.01 # Standard deviation used to initialize weights.
LEARNING_RATE = 1e-4 # Learning rate for the Adam optimizer.
ADAM_EPSILON = 1e-8 # Epsilon for the Adam optimizer.
# Names of ops, tensors, and features.
INPUT_OP_NAME = 'vggish/input_features'
INPUT_TENSOR_NAME = INPUT_OP_NAME + ':0'
OUTPUT_OP_NAME = 'vggish/embedding'
OUTPUT_TENSOR_NAME = OUTPUT_OP_NAME + ':0'
AUDIO_EMBEDDING_FEATURE_NAME = 'audio_embedding'
|
the-stack_0_25156
|
BASE_URL="https://hazlitt.net/longreads?page="
N_ARTICLE_LINK_PAGES = 17
OUTPUT_FILE = '../article-lists/hazlitt-article-urls.json'
WORKER_THREADS = 16
import json
import datetime
import dateutil.parser
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from datetime import datetime
from newspaper import Article
from bs4 import BeautifulSoup
from typing import List
from queue import Queue
from threading import Thread
from requests import get
@dataclass_json
@dataclass
class AeonArticleUrl:
url: str
title: str
class WriteThread(Thread):
def __init__(self, queue: Queue, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = queue
def run(self):
with open(OUTPUT_FILE, 'a') as output_file:
output_file.write("[\n")
first_entry = True
while True:
article = self.queue.get()
if article is None:
output_file.write("\n]")
break
article_json = article.to_json(indent=4)
if first_entry:
first_entry = False
else:
output_file.write(",\n")
output_file.write(article_json)
class ScrapeThread(Thread):
def __init__(self, chunk, queue: Queue, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chunk = chunk
self.queue = queue
def run(self):
for i in self.chunk:
try:
print(f'Getting articles from list page {i}')
article_list_page = get(f"{BASE_URL}{i}")
soup = BeautifulSoup(article_list_page.text, "html5lib")
articles = soup.find_all('div', {'class': 'views-row'})
for article in articles:
stuff = article.find('div', {'class': 'article-title'})
title = stuff.find('a')
print(title.string.strip())
if title is None or title.string is None:
continue
article_url = AeonArticleUrl(url="https://hazlitt.net" + title['href'], title=str(title.string.strip()) or '')
self.queue.put(article_url)
except Exception as e:
print(f'Something went wrong when scraping: {e}')
print("------------------------------------------")
if __name__ == '__main__':
queue = Queue()
write_thread = WriteThread(queue)
write_thread.start()
worker_threads = []
chunk_size = N_ARTICLE_LINK_PAGES // WORKER_THREADS
for i in range(0, N_ARTICLE_LINK_PAGES+1, chunk_size):
chunk = range(i,i+chunk_size)
worker_threads.append(ScrapeThread(chunk, queue))
for thread in worker_threads:
thread.start()
for thread in worker_threads:
thread.join()
# Signal end of jobs to write thread
queue.put(None)
print('Done.')
write_thread.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.