ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a372df8fc870c0e15ca2ae31c172774214d7abe | """
WSGI config for website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings')
application = get_wsgi_application()
|
py | 1a372e87ea9a38ef8fdfc9ad85d5dfa405326c1d | import csv
from dateutil import tz
from dateutil.parser import parse
import json
import os
import requests
import time
import rt
import sys
sys.path.append('/Users/juliangautier/dataverse-scripts/dataverse_repository_curation_assistant')
from dataverse_repository_curation_assistant_functions import *
# From user get installation URL, apiToken, directory to save CSV file
installationUrl = ''
apiKey = ''
directoryPath = ''
# To search RT system for emails that locked dataset owners have sent to Dataverse support,
# include your RT username and password
rtUserLogin = ''
rtUserPassword = ''
# List lock types. See https://guides.dataverse.org/en/5.10/api/native-api.html?highlight=locks#list-locks-across-all-datasets
lockTypesList = ['Ingest', 'finalizePublication']
currentTime = time.strftime('%Y.%m.%d_%H.%M.%S')
datasetPids = []
# Get dataset PIDs of datasets that have any of the lock types in lockTypesList
for lockType in lockTypesList:
datasetLocksApiEndpoint = f'{installationUrl}/api/datasets/locks?type={lockType}'
response = requests.get(
datasetLocksApiEndpoint,
headers={'X-Dataverse-key': apiKey})
data = response.json()
if data['status'] == 'OK':
for lock in data['data']:
datasetPid = lock['dataset']
datasetPids.append(datasetPid)
# Use set function to deduplicate datasetPids list and convert set to a list again
datasetPids = list(set(datasetPids))
total = len(datasetPids)
if total == 0:
print('No locked datasets found.')
elif total > 0:
# Log in to RT to search for support emails from the dataset depositors
tracker = rt.Rt('https://help.hmdc.harvard.edu/REST/1.0/', rtUserLogin, rtUserPassword)
tracker.login()
print('Logged into RT support email system')
count = 0
# Create CSV file and header row
csvOutputFile = f'dataset_locked_status_{currentTime}.csv'
csvOutputFilePath = os.path.join(directoryPath, csvOutputFile)
with open(csvOutputFilePath, mode='w', newline='') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
f.writerow(['dataset_pid', 'dataset_url', 'lock_reason', 'locked_date', 'user_name', 'contact_email', 'rtticket_urls'])
# For each dataset, write to the CSV file info about each lock the dataset has
for datasetPid in datasetPids:
# Get contact email addresses of the dataset
contactEmailsList = []
for field in datasetMetadata['data']['latestVersion']['metadataBlocks']['citation']['fields']:
if field['typeName'] == 'datasetContact':
for contact in field['value']:
contactEmail = contact['datasetContactEmail']['value']
contactEmailsString = list_to_string(contactEmailsList)
# If RT username and password is provided, log into RT and use the contact email addresses to
# search for support emails from the dataset owner
if rtUserLogin and rtUserPassword != '':
datasetMetadata = get_dataset_metadata_export(
installationUrl=installationUrl, datasetPid=datasetPid,
exportFormat='dataverse_json', header={}, apiKey=apiKey)
rtTicketUrlsList = []
for contactEmail in contactEmailsList:
# Search RT system for emails sent from the contact email address
searchResults = tracker.search(
Queue='dataverse_support',
raw_query=f'Requestor.EmailAddress="{contactEmail}"')
# If there are any RT tickets found, save the ticket URL
if len(searchResults) > 0:
for rtTicket in searchResults:
rtTicketID = rtTicket['numerical_id']
rtTicketUrl = f'https://help.hmdc.harvard.edu/Ticket/Display.html?id={rtTicketID}'
rtTicketUrlsList.append(rtTicketUrl)
contactEmailsList.append(contactEmail)
# Use set function to deduplicate rtTicketUrlsList list and convert set to a list again
rtTicketUrlsList = list(set(rtTicketUrlsList))
# Convert list of ticket URLs to a string (to add to CSV file later)
rtTicketUrlsString = list_to_string(rtTicketUrlsList)
# If no RT username or password are provided...
elif rtUserLogin or rtUserPassword == '':
rtTicketUrlsString = 'Not logged into RT. Provide RT username and password)'
# Get all data about locks on the dataset
url = f'{installationUrl}/api/datasets/:persistentId/locks?persistentId={datasetPid}'
allLockData = requests.get(url).json()
count += 1
for lock in allLockData['data']:
datasetUrl = f'{installationUrl}/dataset.xhtml?persistentId={datasetPid}'
reason = lock['lockType']
lockedDate = convert_to_local_tz(lock['date'], shortDate=True)
userName = lock['user']
f.writerow([datasetPid, datasetUrl, reason, lockedDate, userName, contactEmailsString, rtTicketUrlsString])
print(f'Recording information about {count} of {total} datasets: {datasetPid}')
|
py | 1a372ebd27377083f6d60683297eb387b64361c9 | import scrapy # noqa: F401
import snoop
import isort # noqa: F401
from itertools import zip_longest
class SPIDER_1984(scrapy.Spider):
name = 'spider_1984'
start_urls = ["https://www.reddit.com/r/linux/comments/sjlu6l/version_0345_pipewire/"]
@snoop
def parse(self, response):
srch_titles = response.xpath('//h1/text()').get()
srch_links = response.xpath('//a[@href]').getall()
srch_content = response.xpath("//p/text()").getall()
for item in zip_longest(srch_titles, srch_links, srch_content, fillvalue='missing'):
results = {
"title": item[0],
"links": item[1],
"content": item[2],
}
yield results
|
py | 1a372ee26fa1d7c9210f04df430c29c7a663adf5 | import sys
sys.path.insert(0, '../../')
import pyrosim
def send_point_mass_example( sim ):
fixed_box = sim.send_box( position = ( -2, 0, 1 ) )
sim.send_slider_joint( -1, fixed_box, joint_range = 0 )
free_box = sim.send_box( position = ( -2.5, 0, 1 ) )
sim.send_point_mass_spring_joint( fixed_box, free_box,
resting_length = 0.5,
stiffness = 1.0 )
def send_hinge_example( sim ):
fixed_box = sim.send_box( position = ( 0.5, 0, 1 ),
color = ( 1, 0, 0 ) )
sim.send_slider_joint( -1, fixed_box, joint_range = 0 )
free_box = sim.send_box( position = ( -0.5, 0, 1 ),
color = ( 1, 0, 0 ) )
sim.send_hinge_spring_joint( fixed_box, free_box,
stiffness = 0.5,
axis1 = ( 0, 1, 0 ),
axis2 = ( 0, 0, 1 ),
damping = 0.01 )
def send_linear_example( sim ):
box1 = sim.send_box( position = ( 2, 0, 1 ),
color = ( 0, 1, 0 ) )
box2 = sim.send_box( position = ( 2.5, 0, 1 ),
color = ( 0, 1, 0 ) )
sim.send_linear_spring_joint( box1, box2,
stiffness = 1.0,
resting_length = 0.75,
damping = 0.01 )
sim = pyrosim.Simulator( eval_steps = -1, play_paused = True, draw_joints = True )
sim.set_friction( mu = 0 )
sim.set_current_collision_group( 'springs' )
send_point_mass_example( sim )
send_linear_example( sim )
send_hinge_example( sim )
sim.set_current_collision_group( 'environment' )
# send env box
env_box = sim.send_box( position = ( -0.6, 0, 4 ),
color = ( 0, 0, 0 ) )
sim.assign_collision( 'springs', 'environment' )
sim.start()
sim.wait_to_finish()
print(sim._raw_cerr) |
py | 1a372f394ce8ef68be0d085bd31ee5e3e530dcc0 | #!/usr/bin/env python3
import json
import sys
pastafile = "./plugins/copypasta/copypastas.json"
def commandlist(obj):
commands = ""
for key in obj:
commands += str(key) + " "
cmds = "`"+commands.strip().replace(" ", ", ")+"`"
return cmds
with open(pastafile) as pf:
try:
obj = json.load(pf)
except ValueError:
print('Error loading JSON from file')
if (len(sys.argv) < 3):
cmds = commandlist(obj)
reply = "Missing argument. Current available copypasta are: " + cmds
else:
pasta = ""
for key in obj:
if (sys.argv[2] == key):
pasta = obj[key]
if (pasta == ""):
cmds = commandlist(obj)
reply = "Invalid argument. Current available copypasta are: " + cmds
else:
reply = pasta
print(reply)
|
py | 1a372f714f6aa22ae8ee5e2611389e9190520dea | #!/usr/bin/env python
r"""
See help text for details.
"""
import sys
import subprocess
import re
save_dir_path = sys.path.pop(0)
modules = ['gen_arg', 'gen_print', 'gen_valid', 'gen_misc', 'gen_cmd', 'var_funcs']
for module in modules:
exec("from " + module + " import *")
sys.path.insert(0, save_dir_path)
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS]',
description="%(prog)s will create a status file path name adhering to the"
+ " following pattern: <status dir path>/<prefix>.yymmdd."
+ "hhmmss.status. It will then run the command string and"
+ " direct its stdout/stderr to the status file and optionally"
+ " to stdout. This dual output streaming will be"
+ " accomplished using either the \"script\" or the \"tee\""
+ " program. %(prog)s will also set and export environment"
+ " variable \"AUTO_STATUS_FILE_PATH\" for the benefit of"
+ " child programs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
parser.add_argument(
'--status_dir_path',
default='',
help="The path to the directory where the status file will be created."
+ "%(default)s The default value is obtained from environment"
+ " variable \"${STATUS_DIR_PATH}\", if set or from \"${HOME}/"
+ "status/\".")
parser.add_argument(
'--prefix',
default='',
help="The prefix for the generated file name.%(default)s The default value"
+ " is the command portion (i.e. the first token) of the command"
+ " string.")
parser.add_argument(
'--status_file_name',
default='',
help="This allows the user to explicitly specify the status file name. If"
+ " this argument is not used, %(prog)s composes a status file name."
+ " If this argument is specified, the \"--prefix\" argument is"
+ " ignored.")
parser.add_argument(
'--stdout',
default=1,
type=int,
choices=[1, 0],
help="Indicates that stdout/stderr from the command string execution"
+ " should be written to stdout as well as to the status file.")
parser.add_argument(
'--tee',
default=1,
type=int,
choices=[1, 0],
help="Indicates that \"tee\" rather than \"script\" should be used.")
parser.add_argument(
'--show_url',
default=0,
type=int,
choices=[1, 0],
help="Indicates that the status file path shown should be shown in the"
+ " form of a url. If the output is to be viewed from a browser,"
+ " this may well become a clickable link. Note that the"
+ " get_file_path_url.py program must be found in the \"PATH\""
+ " environment variable for this argument to be effective.")
parser.add_argument(
'command_string',
default='',
nargs='*',
help="The command string to be run.%(default)s")
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
def validate_parms():
r"""
Validate program parameters, etc.
"""
global status_dir_path
global command_string
# Convert command_string from list to string.
command_string = " ".join(command_string)
set_pgm_arg(command_string)
valid_value(command_string)
if status_dir_path == "":
status_dir_path = \
os.environ.get("STATUS_DIR_PATH",
os.environ.get("HOME") + "/status/")
status_dir_path = add_trailing_slash(status_dir_path)
set_pgm_arg(status_dir_path)
valid_dir_path(status_dir_path)
global prefix
global status_file_name
if status_file_name == "":
if prefix == "":
prefix = command_string.split(" ")[0]
# File extensions (e.g. ".sh", ".py", .etc), look clumsy in status file names.
extension_regex = "\\.[a-zA-Z0-9]{1,3}$"
prefix = re.sub(extension_regex, "", prefix)
set_pgm_arg(prefix)
status_file_name = prefix + "." + file_date_time_stamp() + ".status"
set_pgm_arg(status_file_name)
global status_file_path
status_file_path = status_dir_path + status_file_name
# Set environment variable for the benefit of child programs.
os.environ['AUTO_STATUS_FILE_PATH'] = status_file_path
# Set deprecated but still used AUTOSCRIPT_STATUS_FILE_PATH value.
os.environ['AUTOSCRIPT_STATUS_FILE_PATH'] = status_file_path
def script_func(command_string, status_file_path):
r"""
Run the command string producing both stdout and file output via the script command and return the
shell_rc.
Description of argument(s):
command_string The command string to be run.
status_file_path The path to the status file which is to contain a copy of all stdout.
"""
cmd_buf = "script -a -q -f " + status_file_path + " -c '" \
+ escape_bash_quotes(command_string) + " ; printf \"\\n" \
+ sprint_varx(ret_code_str, "${?}").rstrip("\n") + "\\n\"'"
qprint_issuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
shell_rc = sub_proc.returncode
# Retrieve return code by examining ret_code_str output statement from status file.
# Example text to be analyzed.
# auto_status_file_ret_code: 127
cmd_buf = "tail -n 10 " + status_file_path + " | egrep -a \"" \
+ ret_code_str + ":[ ]+\""
rc, output = shell_cmd(cmd_buf)
key, value = parse_key_value(output)
shell_rc = int(value)
return shell_rc
def tee_func(command_string, status_file_path):
r"""
Run the command string producing both stdout and file output via the tee command and return the shell_rc.
Description of argument(s):
command_string The command string to be run.
status_file_path The path to the status file which is to contain a copy of all stdout.
"""
cmd_buf = "set -o pipefail ; " + command_string + " 2>&1 | tee -a " \
+ status_file_path
qprint_issuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
shell_rc = sub_proc.returncode
print
print_varx(ret_code_str, shell_rc)
with open(status_file_path, "a") as status_file:
# Append ret code string and status_file_path to end of status file.
status_file.write("\n" + sprint_varx(ret_code_str, shell_rc))
return shell_rc
def main():
gen_setup()
set_term_options(term_requests={'pgm_names': [command_string.split(" ")[0]]})
global ret_code_str
ret_code_str = re.sub("\\.py$", "", pgm_name) + "_ret_code"
global show_url
if show_url:
shell_rc, output = shell_cmd("which get_file_path_url.py", show_err=0)
if shell_rc != 0:
show_url = 0
set_pgm_arg(show_url)
else:
shell_rc, status_file_url = shell_cmd("get_file_path_url.py "
+ status_file_path)
status_file_url = status_file_url.rstrip("\n")
# Print status file path/url to stdout and to status file.
with open(status_file_path, "w+") as status_file:
if show_url:
print_var(status_file_url)
status_file.write(sprint_var(status_file_url))
else:
print_var(status_file_path)
status_file.write(sprint_var(status_file_path))
if stdout:
if tee:
shell_rc = tee_func(command_string, status_file_path)
else:
shell_rc = script_func(command_string, status_file_path)
if show_url:
print_var(status_file_url)
else:
print_var(status_file_path)
else:
cmd_buf = command_string + " >> " + status_file_path + " 2>&1"
shell_rc, output = shell_cmd(cmd_buf, show_err=0)
with open(status_file_path, "a") as status_file:
# Append ret code string and status_file_path to end of status
# file.
status_file.write("\n" + sprint_varx(ret_code_str, shell_rc))
# Append status_file_path print statement to end of status file.
with open(status_file_path, "a") as status_file:
if show_url:
status_file.write(sprint_var(status_file_url))
else:
status_file.write(sprint_var(status_file_path))
exit(shell_rc)
main()
|
py | 1a372fe70d4712a1703548e4496e067ea8ffc522 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr
import mimetypes, json
from werkzeug.wrappers import Response
from frappe.website.context import get_context
from frappe.website.utils import scrub_relative_urls, get_home_page, can_cache, delete_page_cache
from frappe.website.router import clear_sitemap
class PageNotFoundError(Exception): pass
def render(path, http_status_code=None):
"""render html page"""
path = resolve_path(path.strip("/ "))
frappe.local.path = path
try:
data = render_page(path)
except frappe.DoesNotExistError, e:
doctype, name = get_doctype_from_path(path)
if doctype and name:
path = "print"
frappe.local.form_dict.doctype = doctype
frappe.local.form_dict.name = name
elif doctype:
path = "list"
frappe.local.form_dict.doctype = doctype
else:
path = "404"
http_status_code = e.http_status_code
try:
data = render_page(path)
except frappe.PermissionError, e:
data, http_status_code = render_403(e, path)
except frappe.PermissionError, e:
data, http_status_code = render_403(e, path)
except frappe.Redirect, e:
return build_response(path, "", 301, {
"Location": frappe.flags.redirect_location,
"Cache-Control": "no-store, no-cache, must-revalidate"
})
except Exception:
path = "error"
data = render_page(path)
http_status_code = 500
return build_response(path, data, http_status_code or 200)
def render_403(e, pathname):
path = "message"
frappe.local.message = """<p><strong>{error}</strong></p>
<p>
<a href="/login?redirect-to=/{pathname}" class="btn btn-primary">{login}</a>
</p>""".format(error=cstr(e), login=_("Login"), pathname=frappe.local.path)
frappe.local.message_title = _("Not Permitted")
return render_page(path), e.http_status_code
def get_doctype_from_path(path):
doctypes = frappe.db.sql_list("select name from tabDocType")
parts = path.split("/")
doctype = parts[0]
name = parts[1] if len(parts) > 1 else None
if doctype in doctypes:
return doctype, name
# try scrubbed
doctype = doctype.replace("_", " ").title()
if doctype in doctypes:
return doctype, name
return None, None
def build_response(path, data, http_status_code, headers=None):
# build response
response = Response()
response.data = set_content_type(response, data, path)
response.status_code = http_status_code
response.headers[b"X-Page-Name"] = path.encode("utf-8")
response.headers[b"X-From-Cache"] = frappe.local.response.from_cache or False
if headers:
for key, val in headers.iteritems():
response.headers[bytes(key)] = val.encode("utf-8")
return response
def render_page(path):
"""get page html"""
cache_key = ("page_context:{}" if is_ajax() else "page:{}").format(path)
out = None
# try memcache
if can_cache():
out = frappe.cache().get_value(cache_key)
if out and is_ajax():
out = out.get("data")
if out:
frappe.local.response.from_cache = True
return out
return build(path)
def build(path):
if not frappe.db:
frappe.connect()
build_method = (build_json if is_ajax() else build_page)
try:
return build_method(path)
except frappe.DoesNotExistError:
hooks = frappe.get_hooks()
if hooks.website_catch_all:
path = hooks.website_catch_all[0]
return build_method(path)
else:
raise
def build_json(path):
return get_context(path).data
def build_page(path):
context = get_context(path)
html = frappe.get_template(context.base_template_path).render(context)
html = scrub_relative_urls(html)
if can_cache(context.no_cache):
frappe.cache().set_value("page:" + path, html)
return html
def is_ajax():
return getattr(frappe.local, "is_ajax", False)
def resolve_path(path):
if not path:
path = "index"
if path.endswith('.html'):
path = path[:-5]
if path == "index":
path = get_home_page()
return path
def set_content_type(response, data, path):
if isinstance(data, dict):
response.headers[b"Content-Type"] = b"application/json; charset: utf-8"
data = json.dumps(data)
return data
response.headers[b"Content-Type"] = b"text/html; charset: utf-8"
if "." in path:
content_type, encoding = mimetypes.guess_type(path)
if not content_type:
content_type = "text/html; charset: utf-8"
response.headers[b"Content-Type"] = content_type.encode("utf-8")
return data
def clear_cache(path=None):
if path:
delete_page_cache(path)
else:
clear_sitemap()
frappe.clear_cache("Guest")
frappe.cache().delete_value("_website_pages")
for method in frappe.get_hooks("website_clear_cache"):
frappe.get_attr(method)(path)
|
py | 1a3730ea7a142d83ec4c1441e808be3b0c84a7af | class Game:
"""
@brief A Game object, used along with the Games data source.
This is a convenience class provided for users who wish to use this
data source as part of their application. It provides an API that makes
it easy to access the attributes of this data set.
Each game record has title, platform on which it can be played,
rating, and a list of genres.
This object is generally not created by the user, to see how its created check
out bridges::data_src_dependent::data_source::get_game_data()
@sa For an example, check out https://bridgesuncc.github.io/tutorials/Data_IGN_Games.html
@author Matthew Mcquaigue, Kalpathi Subramanian
@date 2/1/17, 12/29/20, 1/6/21
"""
def __init__(self, title: str = "", platform: str = "", rating: float = 0.0, genre: str = ""):
"""
@brief Constructor
Args:
title: game title
platform: game platform
rating: game rating
genre: game's genres
"""
self._title = title
self._platform = platform
self._rating = rating
self._genre = genre
@property
def title(self):
"""
@brief get game title
Returns:
game title
"""
return self._title
@title.setter
def title(self, t):
"""
@brief Set game title
Args:
t: game title to set
"""
self._title = t
@property
def platform(self):
"""
@brief get game platform
Returns:
game platform
"""
return self._platform
@platform.setter
def platform(self, p):
"""
@brief Set game platform
Args:
p: game platform to set
"""
self._platform = p
@property
def rating(self):
"""
@brief get game rating
Returns:
game rating
"""
return self._rating
@rating.setter
def rating(self, r):
"""
@brief Set game rating
Args:
r: game rating to set
"""
self._rating = r
@property
def genre(self):
"""
@brief get game genres
Returns:
game genres (list of strings)
"""
return self._genre
@genre.setter
def genre(self, g):
"""
@brief Set game title
Args:
g: game genres to set
"""
self._genre = g
|
py | 1a373100f833540102ea9bea89bd9108c592524d | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with this
work for additional information regarding copyright ownership. The ASF
licenses this file to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
The code in this file was developed at Harvard University (2018) and
modified at ChemOS Inc. (2019) as stated in the NOTICE file.
'''
__author__ = 'Florian Hase'
#=========================================================================
from utilities.decorators import safe_execute
from utilities.defaults import default_general_configurations
from utilities.defaults import default_database_configurations
from utilities.exceptions import PhoenicsParseError
from utilities.exceptions import PhoenicsModuleError
from utilities.exceptions import PhoenicsNotFoundError
from utilities.exceptions import PhoenicsUnknownSettingsError
from utilities.exceptions import PhoenicsValueError
from utilities.exceptions import PhoenicsVersionError
from utilities.logger import Logger
from utilities.json_parser import ParserJSON
from utilities.pickle_parser import ParserPickle
from utilities.config_parser import ConfigParser
|
py | 1a373270c18c5cbf872dc3a5401dca8ebb38c100 | import os
import copy
import regex
import asyncio
import logging
import contextlib
import collections
from collections.abc import Mapping
import synapse
import synapse.exc as s_exc
import synapse.axon as s_axon
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.datamodel as s_datamodel
import synapse.lib.base as s_base
import synapse.lib.cell as s_cell
import synapse.lib.chop as s_chop
import synapse.lib.coro as s_coro
import synapse.lib.hive as s_hive
import synapse.lib.view as s_view
import synapse.lib.cache as s_cache
import synapse.lib.layer as s_layer
import synapse.lib.nexus as s_nexus
import synapse.lib.queue as s_queue
import synapse.lib.scope as s_scope
import synapse.lib.storm as s_storm
import synapse.lib.agenda as s_agenda
import synapse.lib.config as s_config
import synapse.lib.parser as s_parser
import synapse.lib.dyndeps as s_dyndeps
import synapse.lib.grammar as s_grammar
import synapse.lib.httpapi as s_httpapi
import synapse.lib.modules as s_modules
import synapse.lib.spooled as s_spooled
import synapse.lib.version as s_version
import synapse.lib.modelrev as s_modelrev
import synapse.lib.stormsvc as s_stormsvc
import synapse.lib.lmdbslab as s_lmdbslab
# Importing these registers their commands
import synapse.lib.stormhttp as s_stormhttp # NOQA
import synapse.lib.stormwhois as s_stormwhois # NOQA
import synapse.lib.provenance as s_provenance
import synapse.lib.stormtypes as s_stormtypes
import synapse.lib.stormlib.json as s_stormlib_json # NOQA
import synapse.lib.stormlib.stix as s_stormlib_stix
import synapse.lib.stormlib.macro as s_stormlib_macro
import synapse.lib.stormlib.model as s_stormlib_model
import synapse.lib.stormlib.backup as s_stormlib_backup # NOQA
import synapse.lib.stormlib.infosec as s_stormlib_infosec # NOQA
import synapse.lib.stormlib.project as s_stormlib_project # NOQA
import synapse.lib.stormlib.version as s_stormlib_version # NOQA
import synapse.lib.stormlib.modelext as s_stormlib_modelext # NOQA
logger = logging.getLogger(__name__)
stormlogger = logging.getLogger('synapse.storm')
'''
A Cortex implements the synapse hypergraph object.
'''
reqver = '>=0.2.0,<3.0.0'
# Constants returned in results from syncLayersEvents and syncIndexEvents
SYNC_NODEEDITS = 0 # A nodeedits: (<offs>, 0, <etyp>, (<etype args>), {<meta>})
SYNC_NODEEDIT = 1 # A nodeedit: (<offs>, 0, <etyp>, (<etype args>))
SYNC_LAYR_ADD = 3 # A layer was added
SYNC_LAYR_DEL = 4 # A layer was deleted
# push/pull def
reqValidPush = s_config.getJsValidator({
'type': 'object',
'properties': {
'url': {'type': 'string'},
'time': {'type': 'number'},
'iden': {'type': 'string', 'pattern': s_config.re_iden},
'user': {'type': 'string', 'pattern': s_config.re_iden},
},
'additionalProperties': True,
'required': ['iden', 'url', 'user', 'time'],
})
reqValidPull = reqValidPush
reqValidTagModel = s_config.getJsValidator({
'type': 'object',
'properties': {
'prune': {'type': 'number', 'minimum': 1},
'regex': {'type': 'array', 'items': {'type': ['string', 'null']}},
},
'additionalProperties': False,
'required': [],
})
def cmprkey_indx(x):
return x[1]
def cmprkey_buid(x):
return x[1][1]
async def wrap_liftgenr(iden, genr):
async for indx, buid, sode in genr:
yield iden, (indx, buid), sode
class CoreApi(s_cell.CellApi):
'''
The CoreApi is exposed when connecting to a Cortex over Telepath.
Many CoreApi methods operate on packed nodes consisting of primitive data structures
which can be serialized with msgpack/json.
An example of a packaged Node::
( (<form>, <valu>), {
"props": {
<name>: <valu>,
...
},
"tags": {
"foo": <time>,
"foo.bar": <time>,
},
})
'''
@s_cell.adminapi()
def getCoreMods(self):
return self.cell.getCoreMods()
def stat(self):
self.user.confirm(('status',))
s_common.deprecated('stat')
return self.cell.stat()
async def getModelDict(self):
'''
Return a dictionary which describes the data model.
Returns:
(dict): A model description dictionary.
'''
return await self.cell.getModelDict()
async def getModelDefs(self):
return await self.cell.getModelDefs()
def getCoreInfo(self):
'''
Return static generic information about the cortex including model definition
'''
return self.cell.getCoreInfo()
async def getCoreInfoV2(self):
'''
Return static generic information about the cortex including model definition
'''
return await self.cell.getCoreInfoV2()
def _reqValidStormOpts(self, opts):
if opts is None:
opts = {}
opts.setdefault('user', self.user.iden)
if opts.get('user') != self.user.iden:
self.user.confirm(('impersonate',))
return opts
async def callStorm(self, text, opts=None):
'''
Return the value expressed in a return() statement within storm.
'''
opts = self._reqValidStormOpts(opts)
return await self.cell.callStorm(text, opts=opts)
async def exportStorm(self, text, opts=None):
'''
Execute a storm query and package nodes for export/import.
NOTE: This API yields nodes after an initial complete lift
in order to limit exported edges.
'''
opts = self._reqValidStormOpts(opts)
async for pode in self.cell.exportStorm(text, opts=opts):
yield pode
async def feedFromAxon(self, sha256, opts=None):
'''
Import a msgpack .nodes file from the axon.
'''
opts = self._reqValidStormOpts(opts)
return await self.cell.feedFromAxon(sha256, opts=opts)
async def addCronJob(self, cdef):
'''
Add a cron job to the cortex
A cron job is a persistently-stored item that causes storm queries to be run in the future. The specification
for the times that the queries run can be one-shot or recurring.
Args:
query (str): The storm query to execute in the future
reqs (Union[Dict[str, Union[int, List[int]]], List[Dict[...]]]):
Either a dict of the fixed time fields or a list of such dicts. The keys are in the set ('year',
'month', 'dayofmonth', 'dayofweek', 'hour', 'minute'. The values must be positive integers, except for
the key of 'dayofmonth' in which it may also be a negative integer which represents the number of days
from the end of the month with -1 representing the last day of the month. All values may also be lists
of valid values.
incunit (Optional[str]):
A member of the same set as above, with an additional member 'day'. If is None (default), then the
appointment is one-shot and will not recur.
incvals (Union[int, List[int]):
A integer or a list of integers of the number of units
Returns (bytes):
An iden that can be used to later modify, query, and delete the job.
Notes:
reqs must have fields present or incunit must not be None (or both)
The incunit if not None it must be larger in unit size than all the keys in all reqs elements.
'''
cdef['creator'] = self.user.iden
s_common.deprecated('addCronJob')
self.user.confirm(('cron', 'add'), gateiden='cortex')
return await self.cell.addCronJob(cdef)
async def delCronJob(self, iden):
'''
Delete a cron job
Args:
iden (bytes): The iden of the cron job to be deleted
'''
s_common.deprecated('delCronJob')
self.user.confirm(('cron', 'del'), gateiden=iden)
await self.cell.delCronJob(iden)
async def updateCronJob(self, iden, query):
'''
Change an existing cron job's query
Args:
iden (bytes): The iden of the cron job to be changed
'''
s_common.deprecated('updateCronJob')
self.user.confirm(('cron', 'set'), gateiden=iden)
await self.cell.updateCronJob(iden, query)
async def enableCronJob(self, iden):
'''
Enable a cron job
Args:
iden (bytes): The iden of the cron job to be changed
'''
s_common.deprecated('enableCronJob')
self.user.confirm(('cron', 'set'), gateiden=iden)
await self.cell.enableCronJob(iden)
async def disableCronJob(self, iden):
'''
Enable a cron job
Args:
iden (bytes): The iden of the cron job to be changed
'''
s_common.deprecated('disableCronJob')
self.user.confirm(('cron', 'set'), gateiden=iden)
await self.cell.disableCronJob(iden)
async def listCronJobs(self):
'''
Get information about all the cron jobs accessible to the current user
'''
s_common.deprecated('listCronJobs')
crons = []
for cron in await self.cell.listCronJobs():
if not self.user.allowed(('cron', 'get'), gateiden=cron.get('iden')):
continue
crons.append(cron)
return crons
async def editCronJob(self, iden, name, valu):
'''
Update a value in a cron definition.
'''
iden = str(iden)
name = str(name)
self.user.confirm(('cron', 'set', name), gateiden=iden)
return await self.cell.editCronJob(iden, name, valu)
async def setStormCmd(self, cdef):
'''
Set the definition of a pure storm command in the cortex.
'''
self.user.confirm(('admin', 'cmds'))
return await self.cell.setStormCmd(cdef)
async def delStormCmd(self, name):
'''
Remove a pure storm command from the cortex.
'''
self.user.confirm(('admin', 'cmds'))
return await self.cell.delStormCmd(name)
async def _reqDefLayerAllowed(self, perms):
view = self.cell.getView()
wlyr = view.layers[0]
self.user.confirm(perms, gateiden=wlyr.iden)
async def addNodeTag(self, iden, tag, valu=(None, None)):
'''
Add a tag to a node specified by iden.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
valu (tuple): A time interval tuple or (None, None).
'''
s_common.deprecated('addNodeTag')
await self._reqDefLayerAllowed(('node', 'tag', 'add', *tag.split('.')))
return await self.cell.addNodeTag(self.user, iden, tag, valu)
async def delNodeTag(self, iden, tag):
'''
Delete a tag from the node specified by iden. Deprecated in 2.0.0.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
'''
s_common.deprecated('delNodeTag')
await self._reqDefLayerAllowed(('node', 'tag', 'del', *tag.split('.')))
return await self.cell.delNodeTag(self.user, iden, tag)
async def setNodeProp(self, iden, name, valu):
'''
Set a property on a single node. Deprecated in 2.0.0.
'''
s_common.deprecated('setNodeProp')
buid = s_common.uhex(iden)
async with await self.cell.snap(user=self.user) as snap:
with s_provenance.claim('coreapi', meth='prop:set', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
prop = node.form.props.get(name)
self.user.confirm(('node', 'prop', 'set', prop.full), gateiden=snap.wlyr.iden)
await node.set(name, valu)
return node.pack()
async def delNodeProp(self, iden, name):
'''
Delete a property from a single node. Deprecated in 2.0.0.
'''
s_common.deprecated('delNodeProp')
buid = s_common.uhex(iden)
async with await self.cell.snap(user=self.user) as snap:
with s_provenance.claim('coreapi', meth='prop:del', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
prop = node.form.props.get(name)
self.user.confirm(('node', 'prop', 'del', prop.full), gateiden=snap.wlyr.iden)
await node.pop(name)
return node.pack()
async def addNode(self, form, valu, props=None):
'''
Deprecated in 2.0.0.
'''
s_common.deprecated('addNode')
async with await self.cell.snap(user=self.user) as snap:
self.user.confirm(('node', 'add', form), gateiden=snap.wlyr.iden)
with s_provenance.claim('coreapi', meth='node:add', user=snap.user.iden):
node = await snap.addNode(form, valu, props=props)
return node.pack()
async def addNodes(self, nodes):
'''
Add a list of packed nodes to the cortex.
Args:
nodes (list): [ ( (form, valu), {'props':{}, 'tags':{}}), ... ]
Yields:
(tuple): Packed node tuples ((form,valu), {'props': {}, 'tags':{}})
Deprecated in 2.0.0
'''
s_common.deprecated('addNodes')
# First check that that user may add each form
done = {}
for node in nodes:
formname = node[0][0]
if done.get(formname):
continue
await self._reqDefLayerAllowed(('node', 'add', formname))
done[formname] = True
async with await self.cell.snap(user=self.user) as snap:
with s_provenance.claim('coreapi', meth='node:add', user=snap.user.iden):
snap.strict = False
async for node in snap.addNodes(nodes):
if node is not None:
node = node.pack()
yield node
async def getFeedFuncs(self):
'''
Get a list of Cortex feed functions.
Notes:
Each feed dictinonary has the name of the feed function, the
full docstring for the feed function, and the first line of
the docstring broken out in their own keys for easy use.
Returns:
tuple: A tuple of dictionaries.
'''
return await self.cell.getFeedFuncs()
async def addFeedData(self, name, items, *, viewiden=None):
view = self.cell.getView(viewiden, user=self.user)
if view is None:
raise s_exc.NoSuchView(iden=viewiden)
wlyr = view.layers[0]
parts = name.split('.')
self.user.confirm(('feed:data', *parts), gateiden=wlyr.iden)
await self.cell.boss.promote('feeddata',
user=self.user,
info={'name': name,
'view': view.iden,
'nitems': len(items),
})
async with await self.cell.snap(user=self.user, view=view) as snap:
with s_provenance.claim('feed:data', name=name, user=snap.user.iden):
snap.strict = False
await snap.addFeedData(name, items)
async def count(self, text, opts=None):
'''
Count the number of nodes which result from a storm query.
Args:
text (str): Storm query text.
opts (dict): Storm query options.
Returns:
(int): The number of nodes resulting from the query.
'''
opts = self._reqValidStormOpts(opts)
return await self.cell.count(text, opts=opts)
async def eval(self, text, opts=None):
'''
Evaluate a storm query and yield packed nodes.
NOTE: This API is deprecated as of 2.0.0 and will be removed in 3.0.0
'''
s_common.deprecated('eval')
opts = self._reqValidStormOpts(opts)
view = self.cell._viewFromOpts(opts)
async for pode in view.iterStormPodes(text, opts=opts):
yield pode
async def storm(self, text, opts=None):
'''
Evaluate a storm query and yield result messages.
Yields:
((str,dict)): Storm messages.
'''
opts = self._reqValidStormOpts(opts)
async for mesg in self.cell.storm(text, opts=opts):
yield mesg
async def reqValidStorm(self, text, opts=None):
'''
Parse a Storm query to validate it.
Args:
text (str): The text of the Storm query to parse.
opts (dict): A Storm options dictionary.
Returns:
True: If the query is valid.
Raises:
BadSyntaxError: If the query is invalid.
'''
return await self.cell.reqValidStorm(text, opts)
async def watch(self, wdef):
'''
Hook cortex/view/layer watch points based on a specified watch definition.
Example:
wdef = { 'tags': [ 'foo.bar', 'baz.*' ] }
async for mesg in core.watch(wdef):
dostuff(mesg)
'''
s_common.deprecated('watch')
iden = wdef.get('view', self.cell.view.iden)
self.user.confirm(('watch',), gateiden=iden)
async for mesg in self.cell.watch(wdef):
yield mesg
async def syncLayerNodeEdits(self, offs, layriden=None, wait=True):
'''
Yield (indx, mesg) nodeedit sets for the given layer beginning at offset.
Once caught up, this API will begin yielding nodeedits in real-time.
The generator will only terminate on network disconnect or if the
consumer falls behind the max window size of 10,000 nodeedit messages.
'''
layr = self.cell.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
self.user.confirm(('sync',), gateiden=layr.iden)
async for item in self.cell.syncLayerNodeEdits(layr.iden, offs, wait=wait):
yield item
@s_cell.adminapi()
async def splices(self, offs=None, size=None, layriden=None):
'''
Return the list of splices at the given offset.
'''
s_common.deprecated('splices')
layr = self.cell.getLayer(layriden)
count = 0
async for mesg in layr.splices(offs=offs, size=size):
count += 1
if not count % 1000:
await asyncio.sleep(0)
yield mesg
@s_cell.adminapi()
async def splicesBack(self, offs=None, size=None):
'''
Return the list of splices backwards from the given offset.
'''
s_common.deprecated('splicesBack')
count = 0
async for mesg in self.cell.view.layers[0].splicesBack(offs=offs, size=size):
count += 1
if not count % 1000: # pragma: no cover
await asyncio.sleep(0)
yield mesg
async def spliceHistory(self):
'''
Yield splices backwards from the end of the splice log.
Will only return the user's own splices unless they are an admin.
'''
s_common.deprecated('spliceHistory')
async for splice in self.cell.spliceHistory(self.user):
yield splice
@s_cell.adminapi()
async def provStacks(self, offs, size):
'''
Return stream of (iden, provenance stack) tuples at the given offset.
'''
count = 0
for iden, stack in self.cell.provstor.provStacks(offs, size):
count += 1
if not count % 1000:
await asyncio.sleep(0)
yield s_common.ehex(iden), stack
@s_cell.adminapi()
async def getProvStack(self, iden: str):
'''
Return the provenance stack associated with the given iden.
Args:
iden (str): the iden of the provenance stack
Note: the iden appears on each splice entry as the 'prov' property
'''
if iden is None:
return None
return self.cell.provstor.getProvStack(s_common.uhex(iden))
async def getPropNorm(self, prop, valu):
'''
Get the normalized property value based on the Cortex data model.
Args:
prop (str): The property to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchProp: If the prop does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
return await self.cell.getPropNorm(prop, valu)
async def getTypeNorm(self, name, valu):
'''
Get the normalized type value based on the Cortex data model.
Args:
name (str): The type to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchType: If the type does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
return await self.cell.getTypeNorm(name, valu)
async def addForm(self, formname, basetype, typeopts, typeinfo):
'''
Add an extended form to the data model.
Extended forms *must* begin with _
'''
self.user.confirm(('model', 'form', 'add', formname))
return await self.cell.addForm(formname, basetype, typeopts, typeinfo)
async def delForm(self, formname):
'''
Remove an extended form from the data model.
'''
self.user.confirm(('model', 'form', 'del', formname))
return await self.cell.delForm(formname)
async def addFormProp(self, form, prop, tdef, info):
'''
Add an extended property to the given form.
Extended properties *must* begin with _
'''
self.user.confirm(('model', 'prop', 'add', form))
return await self.cell.addFormProp(form, prop, tdef, info)
async def delFormProp(self, form, name):
'''
Remove an extended property from the given form.
'''
self.user.confirm(('model', 'prop', 'del', form))
return await self.cell.delFormProp(form, name)
async def addUnivProp(self, name, tdef, info):
'''
Add an extended universal property.
Extended properties *must* begin with _
'''
self.user.confirm(('model', 'univ', 'add'))
return await self.cell.addUnivProp(name, tdef, info)
async def delUnivProp(self, name):
'''
Remove an extended universal property.
'''
self.user.confirm(('model', 'univ', 'del'))
return await self.cell.delUnivProp(name)
async def addTagProp(self, name, tdef, info):
'''
Add a tag property to record data about tags on nodes.
'''
self.user.confirm(('model', 'tagprop', 'add'))
return await self.cell.addTagProp(name, tdef, info)
async def delTagProp(self, name):
'''
Remove a previously added tag property.
'''
self.user.confirm(('model', 'tagprop', 'del'))
return await self.cell.delTagProp(name)
async def addStormPkg(self, pkgdef):
self.user.confirm(('pkg', 'add'))
return await self.cell.addStormPkg(pkgdef)
async def delStormPkg(self, iden):
self.user.confirm(('pkg', 'del'))
return await self.cell.delStormPkg(iden)
@s_cell.adminapi()
async def getStormPkgs(self):
return await self.cell.getStormPkgs()
@s_cell.adminapi()
async def getStormPkg(self, name):
return await self.cell.getStormPkg(name)
@s_cell.adminapi()
async def addStormDmon(self, ddef):
return await self.cell.addStormDmon(ddef)
@s_cell.adminapi()
async def getStormDmons(self):
return await self.cell.getStormDmons()
@s_cell.adminapi()
async def getStormDmonLog(self, iden):
return await self.cell.getStormDmonLog(iden)
@s_cell.adminapi()
async def getStormDmon(self, iden):
return await self.cell.getStormDmon(iden)
@s_cell.adminapi()
async def bumpStormDmon(self, iden):
return await self.cell.bumpStormDmon(iden)
@s_cell.adminapi()
async def disableStormDmon(self, iden):
return await self.cell.disableStormDmon(iden)
@s_cell.adminapi()
async def enableStormDmon(self, iden):
return await self.cell.enableStormDmon(iden)
@s_cell.adminapi()
async def delStormDmon(self, iden):
return await self.cell.delStormDmon(iden)
@s_cell.adminapi(log=True)
async def enableMigrationMode(self):
await self.cell._enableMigrationMode()
@s_cell.adminapi(log=True)
async def disableMigrationMode(self):
await self.cell._disableMigrationMode()
@s_cell.adminapi()
async def cloneLayer(self, iden, ldef=None):
ldef = ldef or {}
ldef['creator'] = self.user.iden
return await self.cell.cloneLayer(iden, ldef)
async def getStormVar(self, name, default=None):
self.user.confirm(('globals', 'get', name))
return await self.cell.getStormVar(name, default=default)
async def popStormVar(self, name, default=None):
self.user.confirm(('globals', 'pop', name))
return await self.cell.popStormVar(name, default=default)
async def setStormVar(self, name, valu):
self.user.confirm(('globals', 'set', name))
return await self.cell.setStormVar(name, valu)
async def syncLayersEvents(self, offsdict=None, wait=True):
self.user.confirm(('sync',))
async for item in self.cell.syncLayersEvents(offsdict=offsdict, wait=wait):
yield item
async def syncIndexEvents(self, matchdef, offsdict=None, wait=True):
self.user.confirm(('sync',))
async for item in self.cell.syncIndexEvents(matchdef, offsdict=offsdict, wait=wait):
yield item
async def iterFormRows(self, layriden, form, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes of a single form, optionally (re)starting at startvalue
Args:
layriden (str): Iden of the layer to retrieve the nodes
form(str): A form name
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterFormRows(layriden, form, stortype=stortype, startvalu=startvalu):
yield item
async def iterPropRows(self, layriden, form, prop, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes with a particular secondary property, optionally (re)starting at startvalue
Args:
layriden (str): Iden of the layer to retrieve the nodes
form(str): A form name.
prop (str): A secondary property name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterPropRows(layriden, form, prop, stortype=stortype, startvalu=startvalu):
yield item
async def iterUnivRows(self, layriden, prop, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes with a particular universal property, optionally (re)starting at startvalue
Args:
layriden (str): Iden of the layer to retrieve the nodes
prop (str): A universal property name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterUnivRows(layriden, prop, stortype=stortype, startvalu=startvalu):
yield item
async def iterTagRows(self, layriden, tag, form=None, starttupl=None):
'''
Yields (buid, (valu, form)) values that match a tag and optional form, optionally (re)starting at starttupl.
Args:
layriden (str): Iden of the layer to retrieve the nodes
tag (str): the tag to match
form (Optional[str]): if present, only yields buids of nodes that match the form.
starttupl (Optional[Tuple[buid, form]]): if present, (re)starts the stream of values there.
Returns:
AsyncIterator[Tuple(buid, (valu, form))]
Note:
This yields (buid, (tagvalu, form)) instead of just buid, valu in order to allow resuming an interrupted
call by feeding the last value retrieved into starttupl
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterTagRows(layriden, tag, form=form, starttupl=starttupl):
yield item
async def iterTagPropRows(self, layriden, tag, prop, form=None, stortype=None, startvalu=None):
'''
Yields (buid, valu) that match a tag:prop, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
tag (str): tag name
prop (str): prop name
form (Optional[str]): optional form name
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterTagPropRows(layriden, tag, prop, form=form, stortype=stortype,
startvalu=startvalu):
yield item
class Cortex(s_cell.Cell): # type: ignore
'''
A Cortex implements the synapse hypergraph.
The bulk of the Cortex API lives on the Snap() object which can
be obtained by calling Cortex.snap() in a with block. This allows
callers to manage transaction boundaries explicitly and dramatically
increases performance.
'''
# For the cortex, nexslog:en defaults to True
confbase = copy.deepcopy(s_cell.Cell.confbase)
confbase['nexslog:en']['default'] = True # type: ignore
confdefs = {
'axon': {
'description': 'A telepath URL for a remote axon.',
'type': 'string'
},
'cron:enable': {
'default': True,
'description': 'Enable cron jobs running.',
'type': 'boolean'
},
'trigger:enable': {
'default': True,
'description': 'Enable triggers running.',
'type': 'boolean'
},
'layer:lmdb:map_async': {
'default': True,
'description': 'Set the default lmdb:map_async value in LMDB layers.',
'type': 'boolean'
},
'layers:lockmemory': {
'default': False,
'description': 'Should new layers lock memory for performance by default.',
'type': 'boolean'
},
'layers:logedits': {
'default': True,
'description': 'Whether nodeedits are logged in each layer.',
'type': 'boolean'
},
'provenance:en': {
'default': False,
'description': 'Enable provenance tracking for all writes.',
'type': 'boolean'
},
'max:nodes': {
'description': 'Maximum number of nodes which are allowed to be stored in a Cortex.',
'type': 'integer',
'minimum': 1,
'hidecmdl': True,
},
'modules': {
'default': [],
'description': 'A list of module classes to load.',
'type': 'array'
},
'storm:log': {
'default': False,
'description': 'Log storm queries via system logger.',
'type': 'boolean'
},
'storm:log:level': {
'default': 30,
'description': 'Logging log level to emit storm logs at.',
'type': 'integer'
},
'http:proxy': {
'description': 'An aiohttp-socks compatible proxy URL to use storm HTTP API.',
'type': 'string',
},
}
cellapi = CoreApi
viewapi = s_view.ViewApi
layerapi = s_layer.LayerApi
hiveapi = s_hive.HiveApi
viewctor = s_view.View.anit
layrctor = s_layer.Layer.anit
# phase 2 - service storage
async def initServiceStorage(self):
# NOTE: we may not make *any* nexus actions in this method
if self.inaugural:
await self.cellinfo.set('cortex:version', s_version.version)
corevers = self.cellinfo.get('cortex:version')
s_version.reqVersion(corevers, reqver, exc=s_exc.BadStorageVersion,
mesg='cortex version in storage is incompatible with running software')
self.views = {}
self.layers = {}
self.modules = {}
self.splicers = {}
self.feedfuncs = {}
self.stormcmds = {}
self.maxnodes = self.conf.get('max:nodes')
self.nodecount = 0
self.stormmods = {} # name: mdef
self.stormpkgs = {} # name: pkgdef
self.stormvars = None # type: s_hive.HiveDict
self.svcsbyiden = {}
self.svcsbyname = {}
self.svcsbysvcname = {} # remote name, not local name
self._propSetHooks = {}
self._runtLiftFuncs = {}
self._runtPropSetFuncs = {}
self._runtPropDelFuncs = {}
self.ontagadds = collections.defaultdict(list)
self.ontagdels = collections.defaultdict(list)
self.ontagaddglobs = s_cache.TagGlobs()
self.ontagdelglobs = s_cache.TagGlobs()
self.tagvalid = s_cache.FixedCache(self._isTagValid, size=1000)
self.tagprune = s_cache.FixedCache(self._getTagPrune, size=1000)
self.libroot = (None, {}, {})
self.bldgbuids = {} # buid -> (Node, Event) Nodes under construction
self.axon = None # type: s_axon.AxonApi
self.axready = asyncio.Event()
self.view = None # The default/main view
proven = self.conf.get('provenance:en')
self.provstor = await s_provenance.ProvStor.anit(self.dirn, proven=proven)
self.onfini(self.provstor.fini)
# generic fini handler for the Cortex
self.onfini(self._onCoreFini)
await self._initCoreHive()
self._initSplicers()
self._initStormLibs()
self._initFeedFuncs()
self._initCortexHttpApi()
self.model = s_datamodel.Model()
# Perform module loading
await self._loadCoreMods()
await self._loadExtModel()
await self._initStormCmds()
# Initialize our storage and views
await self._initCoreAxon()
await self._initCoreLayers()
await self._initCoreViews()
self.onfini(self._finiStor)
await self._initCoreQueues()
self.addHealthFunc(self._cortexHealth)
self.stormdmons = await s_storm.DmonManager.anit(self)
self.onfini(self.stormdmons)
self.agenda = await s_agenda.Agenda.anit(self)
self.onfini(self.agenda)
await self._initStormDmons()
self.trigson = self.conf.get('trigger:enable')
await self._initRuntFuncs()
taghive = await self.hive.open(('cortex', 'tagmeta'))
cmdhive = await self.hive.open(('cortex', 'storm', 'cmds'))
pkghive = await self.hive.open(('cortex', 'storm', 'packages'))
svchive = await self.hive.open(('cortex', 'storm', 'services'))
self.taghive = await taghive.dict()
self.cmdhive = await cmdhive.dict()
self.pkghive = await pkghive.dict()
self.svchive = await svchive.dict()
self.deprlocks = await self.hive.get(('cortex', 'model', 'deprlocks'), {})
# TODO: 3.0.0 conversion will truncate this hive key
for name, locked in self.deprlocks.items():
form = self.model.form(name)
if form is not None:
form.locked = locked
prop = self.model.prop(name)
if prop is not None:
prop.locked = locked
_type = self.model.type(name)
if _type is not None:
_type.locked = locked
# Finalize coremodule loading & give svchive a shot to load
await self._initPureStormCmds()
self.dynitems.update({
'cron': self.agenda,
'cortex': self,
'multiqueue': self.multiqueue,
})
await self.auth.addAuthGate('cortex', 'cortex')
def _setPropSetHook(self, name, hook):
self._propSetHooks[name] = hook
async def _callPropSetHook(self, node, prop, norm):
hook = self._propSetHooks.get(prop.full)
if hook is None:
return
await hook(node, prop, norm)
async def _execCellUpdates(self):
await self._bumpCellVers('cortex:defaults', (
(1, self._addAllLayrRead),
))
async def _addAllLayrRead(self):
layriden = self.getView().layers[0].iden
role = await self.auth.getRoleByName('all')
await role.addRule((True, ('layer', 'read')), gateiden=layriden)
async def initServiceRuntime(self):
# do any post-nexus initialization here...
if self.isactive:
await self._checkNexsIndx()
await self._checkLayerModels()
await self._initCoreMods()
await self._initStormSvcs()
# share ourself via the cell dmon as "cortex"
# for potential default remote use
self.dmon.share('cortex', self)
async def initServiceActive(self):
if self.conf.get('cron:enable'):
await self.agenda.start()
await self.stormdmons.start()
async def initServicePassive(self):
await self.agenda.stop()
await self.stormdmons.stop()
@s_nexus.Pusher.onPushAuto('model:depr:lock')
async def setDeprLock(self, name, locked):
todo = []
prop = self.model.prop(name)
if prop is not None and prop.deprecated:
todo.append(prop)
_type = self.model.type(name)
if _type is not None and _type.deprecated:
todo.append(_type)
if not todo:
mesg = 'setDeprLock() called on non-existant or non-deprecated form, property, or type.'
raise s_exc.NoSuchProp(name=name, mesg=mesg)
self.deprlocks[name] = locked
await self.hive.set(('cortex', 'model', 'deprlocks'), self.deprlocks)
for elem in todo:
elem.locked = locked
async def getDeprLocks(self):
'''
Return a dictionary of deprecated properties and their lock status.
'''
retn = {}
for prop in self.model.props.values():
if not prop.deprecated:
continue
retn[prop.full] = prop.locked
return retn
async def addCoreQueue(self, name, info):
if self.multiqueue.exists(name):
mesg = f'Queue named {name} already exists!'
raise s_exc.DupName(mesg=mesg)
await self._push('queue:add', name, info)
@s_nexus.Pusher.onPush('queue:add')
async def _addCoreQueue(self, name, info):
if self.multiqueue.exists(name):
return
await self.auth.addAuthGate(f'queue:{name}', 'queue')
creator = info.get('creator')
if creator is not None:
user = await self.auth.reqUser(creator)
await user.setAdmin(True, gateiden=f'queue:{name}', logged=False)
await self.multiqueue.add(name, info)
async def listCoreQueues(self):
return self.multiqueue.list()
async def getCoreQueue(self, name):
return self.multiqueue.status(name)
async def delCoreQueue(self, name):
if not self.multiqueue.exists(name):
mesg = f'No queue named {name} exists!'
raise s_exc.NoSuchName(mesg=mesg)
await self._push('queue:del', name)
await self.auth.delAuthGate(f'queue:{name}')
@s_nexus.Pusher.onPush('queue:del')
async def _delCoreQueue(self, name):
if not self.multiqueue.exists(name):
return
await self.multiqueue.rem(name)
async def coreQueueGet(self, name, offs=0, cull=True, wait=False):
if offs and cull:
await self.coreQueueCull(name, offs - 1)
async for item in self.multiqueue.gets(name, offs, cull=False, wait=wait):
return item
async def coreQueueGets(self, name, offs=0, cull=True, wait=False, size=None):
if offs and cull:
await self.coreQueueCull(name, offs - 1)
count = 0
async for item in self.multiqueue.gets(name, offs, cull=False, wait=wait):
yield item
count += 1
if size is not None and count >= size:
return
async def coreQueuePuts(self, name, items):
await self._push('queue:puts', name, items)
@s_nexus.Pusher.onPush('queue:puts', passitem=True)
async def _coreQueuePuts(self, name, items, nexsitem):
nexsoff, nexsmesg = nexsitem
await self.multiqueue.puts(name, items, reqid=nexsoff)
@s_nexus.Pusher.onPushAuto('queue:cull')
async def coreQueueCull(self, name, offs):
await self.multiqueue.cull(name, offs)
@s_nexus.Pusher.onPushAuto('queue:pop')
async def coreQueuePop(self, name, offs):
return await self.multiqueue.pop(name, offs)
async def coreQueueSize(self, name):
return self.multiqueue.size(name)
@s_nexus.Pusher.onPushAuto('tag:model:set')
async def setTagModel(self, tagname, name, valu):
'''
Set a model specification property for a tag.
Arguments:
tagname (str): The name of the tag.
name (str): The name of the property.
valu (object): The value of the property.
Tag Model Properties:
regex - A list of None or regular expression strings to match each tag level.
prune - A number that determines how many levels of pruning are desired.
Examples:
await core.setTagModel("cno.cve", "regex", (None, None, "[0-9]{4}", "[0-9]{5}"))
'''
meta = self.taghive.get(tagname)
if meta is None:
meta = {}
meta[name] = valu
reqValidTagModel(meta)
await self.taghive.set(tagname, meta)
# clear cached entries
if name == 'regex':
self.tagvalid.clear()
elif name == 'prune':
self.tagprune.clear()
@s_nexus.Pusher.onPushAuto('tag:model:del')
async def delTagModel(self, tagname):
'''
Delete all the model specification properties for a tag.
Arguments:
tagname (str): The name of the tag.
'''
await self.taghive.pop(tagname)
self.tagvalid.clear()
self.tagprune.clear()
@s_nexus.Pusher.onPushAuto('tag:model:pop')
async def popTagModel(self, tagname, name):
'''
Pop a property from the model specification of a tag.
Arguments:
tagname (str): The name of the tag.
name (str): The name of the specification property.
Returns:
(object): The current value of the property.
'''
meta = self.taghive.get(tagname)
if meta is None:
return None
retn = meta.pop(name, None)
await self.taghive.set(name, meta)
if name == 'regex':
self.tagvalid.clear()
elif name == 'prune':
self.tagprune.clear()
return retn
async def isTagValid(self, tagname):
'''
Check if a tag name is valid according to tag model regular expressions.
Returns:
(bool): True if the tag is valid.
'''
return self.tagvalid.get(tagname)
def _isTagValid(self, tagname):
parts = s_chop.tagpath(tagname)
for tag in s_chop.tags(tagname):
meta = self.taghive.get(tag)
if meta is None:
continue
regx = meta.get('regex')
if regx is None:
continue
for i in range(min(len(regx), len(parts))):
if regx[i] is None:
continue
if not regex.fullmatch(regx[i], parts[i]):
return False
return True
async def getTagPrune(self, tagname):
return self.tagprune.get(tagname)
def _getTagPrune(self, tagname):
prune = []
pruning = 0
for tag in s_chop.tags(tagname):
if pruning:
pruning -= 1
prune.append(tag)
continue
meta = self.taghive.get(tag)
if meta is None:
continue
pruning = meta.get('prune', 0)
if pruning:
pruning -= 1
prune.append(tag)
# if we dont reach the final tag for pruning, skip it.
if prune and not prune[-1] == tagname:
return ()
return tuple(prune)
async def getTagModel(self, tagname):
'''
Retrieve the tag model specification for a tag.
Returns:
(dict): The tag model specification or None.
'''
retn = self.taghive.get(tagname)
if retn is not None:
return dict(retn)
async def listTagModel(self):
'''
Retrieve a list of the tag model specifications.
Returns:
([(str, dict), ...]): A list of tag model specification tuples.
'''
return list(self.taghive.items())
async def _finiStor(self):
await asyncio.gather(*[view.fini() for view in self.views.values()])
await asyncio.gather(*[layr.fini() for layr in self.layers.values()])
async def _initRuntFuncs(self):
async def onSetTrigDoc(node, prop, valu):
valu = str(valu)
iden = node.ndef[1]
trig = node.snap.view.triggers.get(iden)
node.snap.user.confirm(('trigger', 'set', 'doc'), gateiden=iden)
await trig.set('doc', valu)
node.props[prop.name] = valu
async def onSetTrigName(node, prop, valu):
valu = str(valu)
iden = node.ndef[1]
trig = node.snap.view.triggers.get(iden)
node.snap.user.confirm(('trigger', 'set', 'name'), gateiden=iden)
await trig.set('name', valu)
node.props[prop.name] = valu
async def onSetCronDoc(node, prop, valu):
valu = str(valu)
iden = node.ndef[1]
appt = await self.agenda.get(iden)
node.snap.user.confirm(('cron', 'set', 'doc'), gateiden=iden)
await appt.setDoc(valu, nexs=True)
node.props[prop.name] = valu
async def onSetCronName(node, prop, valu):
valu = str(valu)
iden = node.ndef[1]
appt = await self.agenda.get(iden)
node.snap.user.confirm(('cron', 'set', 'name'), gateiden=iden)
await appt.setName(valu, nexs=True)
node.props[prop.name] = valu
self.addRuntPropSet('syn:cron:doc', onSetCronDoc)
self.addRuntPropSet('syn:cron:name', onSetCronName)
self.addRuntPropSet('syn:trigger:doc', onSetTrigDoc)
self.addRuntPropSet('syn:trigger:name', onSetTrigName)
async def _initStormDmons(self):
node = await self.hive.open(('cortex', 'storm', 'dmons'))
self.stormdmonhive = await node.dict()
for iden, ddef in self.stormdmonhive.items():
try:
await self.runStormDmon(iden, ddef)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'initStormDmon ({iden}) failed: {e}')
async def _initStormSvcs(self):
for iden, sdef in self.svchive.items():
try:
await self._setStormSvc(sdef)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'initStormService ({iden}) failed: {e}')
async def _initCoreQueues(self):
path = os.path.join(self.dirn, 'slabs', 'queues.lmdb')
slab = await s_lmdbslab.Slab.anit(path)
self.onfini(slab.fini)
self.multiqueue = await slab.getMultiQueue('cortex:queue', nexsroot=self.nexsroot)
@s_nexus.Pusher.onPushAuto('cmd:set')
async def setStormCmd(self, cdef):
'''
Set pure storm command definition.
Args:
cdef (dict): A Pure Stormcmd definition dictionary.
Notes:
The definition dictionary is formatted like the following::
{
'name': <name>,
'cmdargs': [
(<name>, <opts>),
]
'cmdconf': {
<str>: <valu>
},
'storm': <text>,
}
'''
name = cdef.get('name')
await self._setStormCmd(cdef)
await self.cmdhive.set(name, cdef)
async def _reqStormCmd(self, cdef):
name = cdef.get('name')
if not s_grammar.isCmdName(name):
raise s_exc.BadCmdName(name=name)
self.getStormQuery(cdef.get('storm'))
async def _getStorNodes(self, buid, layers):
# NOTE: This API lives here to make it easy to optimize
# the cluster case to minimize round trips
return [await layr.getStorNode(buid) for layr in layers]
async def _genSodeList(self, buid, sodes, layers, filtercmpr=None):
sodelist = []
if filtercmpr is not None:
filt = True
for layr in layers[-1::-1]:
sode = sodes.get(layr.iden)
if sode is None:
sode = await layr.getStorNode(buid)
if filt and filtercmpr(sode):
return
else:
filt = False
sodelist.append((layr.iden, sode))
return (buid, sodelist[::-1])
for layr in layers:
sode = sodes.get(layr.iden)
if sode is None:
sode = await layr.getStorNode(buid)
sodelist.append((layr.iden, sode))
return (buid, sodelist)
async def _mergeSodes(self, layers, genrs, cmprkey, filtercmpr=None):
lastbuid = None
sodes = {}
async for layr, (_, buid), sode in s_common.merggenr2(genrs, cmprkey):
if not buid == lastbuid or layr in sodes:
if lastbuid is not None:
sodelist = await self._genSodeList(lastbuid, sodes, layers, filtercmpr)
if sodelist is not None:
yield sodelist
sodes.clear()
lastbuid = buid
sodes[layr] = sode
if lastbuid is not None:
sodelist = await self._genSodeList(lastbuid, sodes, layers, filtercmpr)
if sodelist is not None:
yield sodelist
async def _liftByDataName(self, name, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByDataName(name):
yield (buid, [(layr, sode)])
return
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByDataName(name)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_buid):
yield sodes
async def _liftByProp(self, form, prop, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByProp(form, prop):
yield (buid, [(layr, sode)])
return
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByProp(form, prop)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx):
yield sodes
async def _liftByPropValu(self, form, prop, cmprvals, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByPropValu(form, prop, cmprvals):
yield (buid, [(layr, sode)])
return
def filtercmpr(sode):
props = sode.get('props')
if props is None:
return False
return props.get(prop) is not None
for cval in cmprvals:
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByPropValu(form, prop, (cval,))))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx, filtercmpr):
yield sodes
async def _liftByPropArray(self, form, prop, cmprvals, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByPropArray(form, prop, cmprvals):
yield (buid, [(layr, sode)])
return
if prop is None:
filtercmpr = None
else:
def filtercmpr(sode):
props = sode.get('props')
if props is None:
return False
return props.get(prop) is not None
for cval in cmprvals:
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByPropArray(form, prop, (cval,))))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx, filtercmpr):
yield sodes
async def _liftByFormValu(self, form, cmprvals, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByFormValu(form, cmprvals):
yield (buid, [(layr, sode)])
return
for cval in cmprvals:
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByFormValu(form, (cval,))))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx):
yield sodes
async def _liftByTag(self, tag, form, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByTag(tag, form):
yield (buid, [(layr, sode)])
return
if form is None:
def filtercmpr(sode):
tags = sode.get('tags')
if tags is None:
return False
return tags.get(tag) is not None
else:
filtercmpr = None
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByTag(tag, form)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_buid, filtercmpr):
yield sodes
async def _liftByTagValu(self, tag, cmpr, valu, form, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByTagValu(tag, cmpr, valu, form):
yield (buid, [(layr, sode)])
return
def filtercmpr(sode):
tags = sode.get('tags')
if tags is None:
return False
return tags.get(tag) is not None
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByTagValu(tag, cmpr, valu, form)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_buid, filtercmpr):
yield sodes
async def _liftByTagProp(self, form, tag, prop, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByTagProp(form, tag, prop):
yield (buid, [(layr, sode)])
return
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByTagProp(form, tag, prop)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx):
yield sodes
async def _liftByTagPropValu(self, form, tag, prop, cmprvals, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByTagPropValu(form, tag, prop, cmprvals):
yield (buid, [(layr, sode)])
return
def filtercmpr(sode):
tagprops = sode.get('tagprops')
if tagprops is None:
return False
props = tagprops.get(tag)
if not props:
return False
return props.get(prop) is not None
for cval in cmprvals:
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByTagPropValu(form, tag, prop, (cval,))))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx, filtercmpr):
yield sodes
async def _setStormCmd(self, cdef):
'''
Note:
No change control or persistence
'''
await self._reqStormCmd(cdef)
def ctor(runt, runtsafe):
return s_storm.PureCmd(cdef, runt, runtsafe)
# TODO unify class ctors and func ctors vs briefs...
def getCmdBrief():
return cdef.get('descr', 'No description').strip().split('\n')[0]
ctor.getCmdBrief = getCmdBrief
ctor.pkgname = cdef.get('pkgname')
ctor.svciden = cdef.get('cmdconf', {}).get('svciden', '')
ctor.forms = cdef.get('forms', {})
def getStorNode(form):
ndef = (form.name, form.type.norm(cdef.get('name'))[0])
buid = s_common.buid(ndef)
props = {
'doc': ctor.getCmdBrief()
}
inpt = ctor.forms.get('input')
outp = ctor.forms.get('output')
nodedata = ctor.forms.get('nodedata')
if inpt:
props['input'] = tuple(inpt)
if outp:
props['output'] = tuple(outp)
if nodedata:
props['nodedata'] = tuple(nodedata)
if ctor.svciden:
props['svciden'] = ctor.svciden
if ctor.pkgname:
props['package'] = ctor.pkgname
pnorms = {}
for prop, valu in props.items():
formprop = form.props.get(prop)
if formprop is not None and valu is not None:
pnorms[prop] = formprop.type.norm(valu)[0]
return (buid, {
'ndef': ndef,
'props': pnorms,
})
ctor.getStorNode = getStorNode
name = cdef.get('name')
self.stormcmds[name] = ctor
await self.fire('core:cmd:change', cmd=name, act='add')
async def _popStormCmd(self, name):
self.stormcmds.pop(name, None)
await self.fire('core:cmd:change', cmd=name, act='del')
async def delStormCmd(self, name):
'''
Remove a previously set pure storm command.
'''
ctor = self.stormcmds.get(name)
if ctor is None:
mesg = f'No storm command named {name}.'
raise s_exc.NoSuchCmd(name=name, mesg=mesg)
return await self._push('cmd:del', name)
@s_nexus.Pusher.onPush('cmd:del')
async def _delStormCmd(self, name):
ctor = self.stormcmds.get(name)
if ctor is None:
return
cdef = self.cmdhive.get(name)
if cdef is None:
mesg = f'The storm command ({name}) is not dynamic.'
raise s_exc.CantDelCmd(mesg=mesg)
await self.cmdhive.pop(name)
self.stormcmds.pop(name, None)
await self.fire('core:cmd:change', cmd=name, act='del')
@s_nexus.Pusher.onPushAuto('pkg:add')
async def addStormPkg(self, pkgdef):
'''
Add the given storm package to the cortex.
This will store the package for future use.
'''
s_storm.reqValidPkgdef(pkgdef)
name = pkgdef.get('name')
olddef = self.pkghive.get(name, None)
if olddef is not None:
await self._dropStormPkg(olddef)
await self.loadStormPkg(pkgdef)
await self.pkghive.set(name, pkgdef)
async def delStormPkg(self, name):
pkgdef = self.pkghive.get(name, None)
if pkgdef is None:
mesg = f'No storm package: {name}.'
raise s_exc.NoSuchPkg(mesg=mesg)
return await self._push('pkg:del', name)
@s_nexus.Pusher.onPush('pkg:del')
async def _delStormPkg(self, name):
'''
Delete a storm package by name.
'''
pkgdef = await self.pkghive.pop(name, None)
if pkgdef is None:
return
await self._dropStormPkg(pkgdef)
async def getStormPkg(self, name):
return self.stormpkgs.get(name)
async def getStormPkgs(self):
return list(self.pkghive.values())
async def getStormMods(self):
return self.stormmods
async def getStormMod(self, name):
return self.stormmods.get(name)
def getDataModel(self):
return self.model
async def _tryLoadStormPkg(self, pkgdef):
try:
await self.loadStormPkg(pkgdef)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
name = pkgdef.get('name', '')
logger.exception(f'Error loading pkg: {name}, {str(e)}')
async def _confirmStormPkg(self, pkgdef):
'''
Validate a storm package for loading. Raises if invalid.
'''
# Validate package def
s_storm.reqValidPkgdef(pkgdef)
pkgname = pkgdef.get('name')
# Check minimum synapse version
minversion = pkgdef.get('synapse_minversion')
if minversion is not None and tuple(minversion) > s_version.version:
mesg = f'Storm package {pkgname} requires Synapse {minversion} but ' \
f'Cortex is running {s_version.version}'
raise s_exc.BadVersion(mesg=mesg)
# Validate storm contents from modules and commands
mods = pkgdef.get('modules', ())
cmds = pkgdef.get('commands', ())
onload = pkgdef.get('onload')
svciden = pkgdef.get('svciden')
if onload is not None:
self.getStormQuery(onload)
for mdef in mods:
modtext = mdef.get('storm')
self.getStormQuery(modtext)
mdef.setdefault('modconf', {})
if svciden:
mdef['modconf']['svciden'] = svciden
for cdef in cmds:
cdef['pkgname'] = pkgname
cdef.setdefault('cmdconf', {})
if svciden:
cdef['cmdconf']['svciden'] = svciden
cmdtext = cdef.get('storm')
self.getStormQuery(cmdtext)
async def loadStormPkg(self, pkgdef):
'''
Load a storm package into the storm library for this cortex.
NOTE: This will *not* persist the package (allowing service dynamism).
'''
await self._confirmStormPkg(pkgdef)
name = pkgdef.get('name')
mods = pkgdef.get('modules', ())
cmds = pkgdef.get('commands', ())
# now actually load...
self.stormpkgs[name] = pkgdef
# copy the mods dict and smash the ref so
# updates are atomic and dont effect running
# storm queries.
stormmods = self.stormmods.copy()
for mdef in mods:
modname = mdef.get('name')
stormmods[modname] = mdef
self.stormmods = stormmods
for cdef in cmds:
await self._setStormCmd(cdef)
onload = pkgdef.get('onload')
if onload is not None and self.isactive:
async def _onload():
try:
async for mesg in self.storm(onload):
if mesg[0] in ('print', 'warn'):
logger.warning(f'onload output: {mesg}')
await asyncio.sleep(0)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception: # pragma: no cover
logger.warning(f'onload failed for package: {name}')
self.schedCoro(_onload())
async def _dropStormPkg(self, pkgdef):
'''
Reverse the process of loadStormPkg()
'''
for mdef in pkgdef.get('modules', ()):
modname = mdef.get('name')
self.stormmods.pop(modname, None)
for cdef in pkgdef.get('commands', ()):
name = cdef.get('name')
await self._popStormCmd(name)
pkgname = pkgdef.get('name')
self.stormpkgs.pop(pkgname, None)
def getStormSvc(self, name):
ssvc = self.svcsbyiden.get(name)
if ssvc is not None:
return ssvc
ssvc = self.svcsbyname.get(name)
if ssvc is not None:
return ssvc
ssvc = self.svcsbysvcname.get(name)
if name is not None:
return ssvc
async def waitStormSvc(self, name, timeout=None):
ssvc = self.getStormSvc(name)
return await s_coro.event_wait(ssvc.ready, timeout=timeout)
async def addStormSvc(self, sdef):
'''
Add a registered storm service to the cortex.
'''
iden = sdef.get('iden')
if iden is None:
iden = sdef['iden'] = s_common.guid()
if self.svcsbyiden.get(iden) is not None:
mesg = f'Storm service already exists: {iden}'
raise s_exc.DupStormSvc(mesg=mesg)
return await self._push('svc:add', sdef)
@s_nexus.Pusher.onPush('svc:add')
async def _addStormSvc(self, sdef):
iden = sdef.get('iden')
ssvc = self.svcsbyiden.get(iden)
if ssvc is not None:
return ssvc.sdef
ssvc = await self._setStormSvc(sdef)
await self.svchive.set(iden, sdef)
return ssvc.sdef
async def delStormSvc(self, iden):
sdef = self.svchive.get(iden)
if sdef is None:
mesg = f'No storm service with iden: {iden}'
raise s_exc.NoSuchStormSvc(mesg=mesg, iden=iden)
return await self._push('svc:del', iden)
@s_nexus.Pusher.onPush('svc:del')
async def _delStormSvc(self, iden):
'''
Delete a registered storm service from the cortex.
'''
sdef = self.svchive.get(iden)
if sdef is None: # pragma: no cover
return
try:
if self.isactive:
await self.runStormSvcEvent(iden, 'del')
except asyncio.CancelledError: # pragma: no cover TODO: remove once py 3.8 only
raise
except Exception as e:
logger.exception(f'service.del hook for service {iden} failed with error: {e}')
sdef = await self.svchive.pop(iden)
await self._delStormSvcPkgs(iden)
name = sdef.get('name')
if name is not None:
self.svcsbyname.pop(name, None)
ssvc = self.svcsbyiden.pop(iden, None)
if ssvc is not None:
self.svcsbysvcname.pop(ssvc.svcname, None)
await ssvc.fini()
async def _delStormSvcPkgs(self, iden):
'''
Delete storm packages associated with a service.
'''
oldpkgs = []
for _, pdef in self.pkghive.items():
pkgiden = pdef.get('svciden')
if pkgiden and pkgiden == iden:
oldpkgs.append(pdef)
for pkg in oldpkgs:
name = pkg.get('name')
if name:
await self._delStormPkg(name)
async def setStormSvcEvents(self, iden, edef):
'''
Set the event callbacks for a storm service. Extends the sdef dict.
Args:
iden (str): The service iden.
edef (dict): The events definition.
Notes:
The edef is formatted like the following::
{
<name> : {
'storm': <storm>
}
}
where ``name`` is one of the following items:
add
Run the given storm '*before* the service is first added (a la service.add), but not on a reconnect.
del
Run the given storm *after* the service is removed (a la service.del), but not on a disconnect.
Returns:
dict: An updated storm service definition dictionary.
'''
sdef = self.svchive.get(iden)
if sdef is None:
mesg = f'No storm service with iden: {iden}'
raise s_exc.NoSuchStormSvc(mesg=mesg)
sdef['evts'] = edef
await self.svchive.set(iden, sdef)
return sdef
async def _runStormSvcAdd(self, iden):
sdef = self.svchive.get(iden)
if sdef is None:
mesg = f'No storm service with iden: {iden}'
raise s_exc.NoSuchStormSvc(mesg=mesg)
if sdef.get('added', False):
return
try:
await self.runStormSvcEvent(iden, 'add')
except asyncio.CancelledError: # pragma: no cover TODO: remove once py 3.8 only
raise
except Exception as e:
logger.exception(f'runStormSvcEvent service.add failed with error {e}')
return
sdef['added'] = True
await self.svchive.set(iden, sdef)
async def runStormSvcEvent(self, iden, name):
assert name in ('add', 'del')
sdef = self.svchive.get(iden)
if sdef is None:
mesg = f'No storm service with iden: {iden}'
raise s_exc.NoSuchStormSvc(mesg=mesg)
evnt = sdef.get('evts', {}).get(name, {}).get('storm')
if evnt is None:
return
opts = {'vars': {'cmdconf': {'svciden': iden}}}
coro = s_common.aspin(self.storm(evnt, opts=opts))
if name == 'add':
await coro
else:
self.schedCoro(coro)
async def _setStormSvc(self, sdef):
ssvc = await s_stormsvc.StormSvcClient.anit(self, sdef)
self.onfini(ssvc)
self.svcsbyiden[ssvc.iden] = ssvc
self.svcsbyname[ssvc.name] = ssvc
return ssvc
def getStormSvcs(self):
return list(self.svcsbyiden.values())
# Global stormvars APIs
async def getStormVar(self, name, default=None):
return self.stormvars.get(name, default=default)
@s_nexus.Pusher.onPushAuto('stormvar:pop')
async def popStormVar(self, name, default=None):
return await self.stormvars.pop(name, default=default)
@s_nexus.Pusher.onPushAuto('stormvar:set')
async def setStormVar(self, name, valu):
return await self.stormvars.set(name, valu)
async def itemsStormVar(self):
for item in self.stormvars.items():
yield item
async def _cortexHealth(self, health):
health.update('cortex', 'nominal')
async def _loadExtModel(self):
self.extforms = await (await self.hive.open(('cortex', 'model', 'forms'))).dict()
self.extprops = await (await self.hive.open(('cortex', 'model', 'props'))).dict()
self.extunivs = await (await self.hive.open(('cortex', 'model', 'univs'))).dict()
self.exttagprops = await (await self.hive.open(('cortex', 'model', 'tagprops'))).dict()
for formname, basetype, typeopts, typeinfo in self.extforms.values():
try:
self.model.addType(formname, basetype, typeopts, typeinfo)
form = self.model.addForm(formname, {}, ())
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'Extended form ({formname}) error: {e}')
else:
if form.type.deprecated:
mesg = f'The extended property {formname} is using a deprecated type {form.type.name} which will' \
f' be removed in 3.0.0'
logger.warning(mesg)
for form, prop, tdef, info in self.extprops.values():
try:
prop = self.model.addFormProp(form, prop, tdef, info)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'ext prop ({form}:{prop}) error: {e}')
else:
if prop.type.deprecated:
mesg = f'The extended property {prop.full} is using a deprecated type {prop.type.name} which will' \
f' be removed in 3.0.0'
logger.warning(mesg)
for prop, tdef, info in self.extunivs.values():
try:
self.model.addUnivProp(prop, tdef, info)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'ext univ ({prop}) error: {e}')
for prop, tdef, info in self.exttagprops.values():
try:
self.model.addTagProp(prop, tdef, info)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'ext tag prop ({prop}) error: {e}')
@contextlib.asynccontextmanager
async def watcher(self, wdef):
iden = wdef.get('view', self.view.iden)
view = self.views.get(iden)
if view is None:
raise s_exc.NoSuchView(iden=iden)
async with await s_queue.Window.anit(maxsize=10000) as wind:
tags = wdef.get('tags')
if tags is not None:
tglobs = s_cache.TagGlobs()
[tglobs.add(t, True) for t in tags]
async def ontag(mesg):
name = mesg[1].get('tag')
if not tglobs.get(name):
return
await wind.put(mesg)
for layr in self.view.layers:
layr.on('tag:add', ontag, base=wind)
layr.on('tag:del', ontag, base=wind)
yield wind
async def watch(self, wdef):
'''
Hook cortex/view/layer watch points based on a specified watch definition.
( see CoreApi.watch() docs for details )
'''
async with self.watcher(wdef) as wind:
async for mesg in wind:
yield mesg
@s_nexus.Pusher.onPushAuto('model:univ:add')
async def addUnivProp(self, name, tdef, info):
# the loading function does the actual validation...
if not name.startswith('_'):
mesg = 'ext univ name must start with "_"'
raise s_exc.BadPropDef(name=name, mesg=mesg)
self.model.addUnivProp(name, tdef, info)
await self.extunivs.set(name, (name, tdef, info))
await self.fire('core:extmodel:change', prop=name, act='add', type='univ')
async def addForm(self, formname, basetype, typeopts, typeinfo):
if not formname.startswith('_'):
mesg = 'Extended form must begin with "_"'
raise s_exc.BadFormDef(form=formname, mesg=mesg)
if self.model.form(formname) is not None:
mesg = f'Form name already exists: {formname}'
raise s_exc.DupFormName(mesg=mesg)
return await self._push('model:form:add', formname, basetype, typeopts, typeinfo)
@s_nexus.Pusher.onPush('model:form:add')
async def _addForm(self, formname, basetype, typeopts, typeinfo):
self.model.addType(formname, basetype, typeopts, typeinfo)
self.model.addForm(formname, {}, ())
await self.extforms.set(formname, (formname, basetype, typeopts, typeinfo))
await self.fire('core:extmodel:change', form=formname, act='add', type='form')
async def delForm(self, formname):
if not formname.startswith('_'):
mesg = 'Extended form must begin with "_"'
raise s_exc.BadFormDef(form=formname, mesg=mesg)
if self.model.form(formname) is None:
raise s_exc.NoSuchForm(name=formname)
return await self._push('model:form:del', formname)
@s_nexus.Pusher.onPush('model:form:del')
async def _delForm(self, formname):
for layr in self.layers.values():
async for item in layr.iterFormRows(formname):
mesg = f'Nodes still exist with form: {formname}'
raise s_exc.CantDelForm(mesg=mesg)
self.model.delForm(formname)
self.model.delType(formname)
await self.extforms.pop(formname, None)
await self.fire('core:extmodel:change', form=formname, act='del', type='form')
@s_nexus.Pusher.onPushAuto('model:prop:add')
async def addFormProp(self, form, prop, tdef, info):
if not prop.startswith('_') and not form.startswith('_'):
mesg = 'Extended prop must begin with "_" or be added to an extended form.'
raise s_exc.BadPropDef(prop=prop, mesg=mesg)
_prop = self.model.addFormProp(form, prop, tdef, info)
if _prop.type.deprecated:
mesg = f'The extended property {_prop.full} is using a deprecated type {_prop.type.name} which will' \
f' be removed in 3.0.0'
logger.warning(mesg)
await self.extprops.set(f'{form}:{prop}', (form, prop, tdef, info))
await self.fire('core:extmodel:change', form=form, prop=prop, act='add', type='formprop')
async def delFormProp(self, form, prop):
full = f'{form}:{prop}'
pdef = self.extprops.get(full)
if pdef is None:
mesg = f'No ext prop named {full}'
raise s_exc.NoSuchProp(form=form, prop=prop, mesg=mesg)
return await self._push('model:prop:del', form, prop)
@s_nexus.Pusher.onPush('model:prop:del')
async def _delFormProp(self, form, prop):
'''
Remove an extended property from the cortex.
'''
full = f'{form}:{prop}'
pdef = self.extprops.get(full)
if pdef is None:
return
for layr in self.layers.values():
async for item in layr.iterPropRows(form, prop):
mesg = f'Nodes still exist with prop: {form}:{prop}'
raise s_exc.CantDelProp(mesg=mesg)
self.model.delFormProp(form, prop)
await self.extprops.pop(full, None)
await self.fire('core:extmodel:change',
form=form, prop=prop, act='del', type='formprop')
async def delUnivProp(self, prop):
udef = self.extunivs.get(prop)
if udef is None:
mesg = f'No ext univ named {prop}'
raise s_exc.NoSuchUniv(name=prop, mesg=mesg)
return await self._push('model:univ:del', prop)
@s_nexus.Pusher.onPush('model:univ:del')
async def _delUnivProp(self, prop):
'''
Remove an extended universal property from the cortex.
'''
udef = self.extunivs.get(prop)
if udef is None:
return
univname = '.' + prop
for layr in self.layers.values():
async for item in layr.iterUnivRows(univname):
mesg = f'Nodes still exist with universal prop: {prop}'
raise s_exc.CantDelUniv(mesg=mesg)
self.model.delUnivProp(prop)
await self.extunivs.pop(prop, None)
await self.fire('core:extmodel:change', name=prop, act='del', type='univ')
async def addTagProp(self, name, tdef, info):
if self.exttagprops.get(name) is not None:
raise s_exc.DupPropName(name=name)
return await self._push('model:tagprop:add', name, tdef, info)
@s_nexus.Pusher.onPush('model:tagprop:add')
async def _addTagProp(self, name, tdef, info):
if self.exttagprops.get(name) is not None:
return
self.model.addTagProp(name, tdef, info)
await self.exttagprops.set(name, (name, tdef, info))
await self.fire('core:tagprop:change', name=name, act='add')
async def delTagProp(self, name):
pdef = self.exttagprops.get(name)
if pdef is None:
mesg = f'No tag prop named {name}'
raise s_exc.NoSuchProp(mesg=mesg, name=name)
return await self._push('model:tagprop:del', name)
@s_nexus.Pusher.onPush('model:tagprop:del')
async def _delTagProp(self, name):
pdef = self.exttagprops.get(name)
if pdef is None:
return
for layr in self.layers.values():
if await layr.hasTagProp(name):
mesg = f'Nodes still exist with tagprop: {name}'
raise s_exc.CantDelProp(mesg=mesg)
self.model.delTagProp(name)
await self.exttagprops.pop(name, None)
await self.fire('core:tagprop:change', name=name, act='del')
async def addNodeTag(self, user, iden, tag, valu=(None, None)):
'''
Add a tag to a node specified by iden.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
valu (tuple): A time interval tuple or (None, None).
'''
buid = s_common.uhex(iden)
async with await self.snap(user=user) as snap:
with s_provenance.claim('coreapi', meth='tag:add', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
await node.addTag(tag, valu=valu)
return node.pack()
async def addNode(self, user, form, valu, props=None):
async with await self.snap(user=user) as snap:
node = await snap.addNode(form, valu, props=props)
return node.pack()
async def delNodeTag(self, user, iden, tag):
'''
Delete a tag from the node specified by iden.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
'''
buid = s_common.uhex(iden)
async with await self.snap(user=user) as snap:
with s_provenance.claim('coreapi', meth='tag:del', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
await node.delTag(tag)
return node.pack()
async def _onCoreFini(self):
'''
Generic fini handler for cortex components which may change or vary at runtime.
'''
if self.axon:
await self.axon.fini()
async def syncLayerNodeEdits(self, iden, offs, wait=True):
'''
Yield (offs, mesg) tuples for nodeedits in a layer.
'''
layr = self.getLayer(iden)
if layr is None:
raise s_exc.NoSuchLayer(iden=iden)
async for item in layr.syncNodeEdits(offs, wait=wait):
yield item
async def syncLayersEvents(self, offsdict=None, wait=True):
'''
Yield (offs, layriden, STYP, item, meta) tuples for nodeedits for *all* layers, interspersed with add/del
layer messages.
STYP is one of the following constants:
SYNC_NODEEDITS: item is a nodeedits (buid, form, edits)
SYNC_LAYR_ADD: A layer was added (item and meta are empty)
SYNC_LAYR_DEL: A layer was deleted (item and meta are empty)
Args:
offsdict(Optional(Dict[str,int])): starting nexus/editlog offset by layer iden. Defaults to 0 for
unspecified layers or if offsdict is None.
wait(bool): whether to pend and stream value until this layer is fini'd
'''
async def layrgenr(layr, startoff, endoff=None, newlayer=False):
if newlayer:
yield layr.addoffs, layr.iden, SYNC_LAYR_ADD, (), {}
wait = endoff is None
if not layr.isfini:
async for ioff, item, meta in layr.syncNodeEdits2(startoff, wait=wait):
if endoff is not None and ioff >= endoff: # pragma: no cover
break
yield ioff, layr.iden, SYNC_NODEEDITS, item, meta
if layr.isdeleted:
yield layr.deloffs, layr.iden, SYNC_LAYR_DEL, (), {}
# End of layrgenr
async for item in self._syncNodeEdits(offsdict, layrgenr, wait=wait):
yield item
async def syncIndexEvents(self, matchdef, offsdict=None, wait=True):
'''
Yield (offs, layriden, <STYPE>, <item>) tuples from the nodeedit logs of all layers starting
from the given nexus/layer offset (they are synchronized). Only edits that match the filter in matchdef will
be yielded, plus EDIT_PROGRESS (see layer.syncIndexEvents) messages.
The format of the 4th element of the tuple depends on STYPE. STYPE is one of the following constants:
SYNC_LAYR_ADD: item is an empty tuple ()
SYNC_LAYR_DEL: item is an empty tuple ()
SYNC_NODEEDIT: item is (buid, form, ETYPE, VALS, META)) or (None, None, s_layer.EDIT_PROGRESS, (), ())
For edits in the past, events are yielded in offset order across all layers. For current data (wait=True),
events across different layers may be emitted slightly out of offset order.
Note:
Will not yield any values from layers created with logedits disabled
Args:
matchdef(Dict[str, Sequence[str]]): a dict describing which events are yielded. See
layer.syncIndexEvents for matchdef specification.
offsdict(Optional(Dict[str,int])): starting nexus/editlog offset by layer iden. Defaults to 0 for
unspecified layers or if offsdict is None.
wait(bool): whether to pend and stream value until this layer is fini'd
'''
async def layrgenr(layr, startoff, endoff=None, newlayer=False):
''' Yields matching results from a single layer '''
if newlayer:
yield layr.addoffs, layr.iden, SYNC_LAYR_ADD, ()
wait = endoff is None
ioff = startoff
if not layr.isfini:
async for ioff, item in layr.syncIndexEvents(startoff, matchdef, wait=wait):
if endoff is not None and ioff >= endoff: # pragma: no cover
break
yield ioff, layr.iden, SYNC_NODEEDIT, item
if layr.isdeleted:
yield layr.deloffs, layr.iden, SYNC_LAYR_DEL, ()
# End of layrgenr
async for item in self._syncNodeEdits(offsdict, layrgenr, wait=wait):
yield item
async def _syncNodeEdits(self, offsdict, genrfunc, wait=True):
'''
Common guts between syncIndexEvents and syncLayersEvents
First, it streams from the layers up to the current offset, sorted by offset.
Then it streams from all the layers simultaneously.
Args:
offsdict(Dict[str, int]): starting nexus/editlog offset per layer. Defaults to 0 if layer not present
genrfunc(Callable): an async generator function that yields tuples that start with an offset. The input
parameters are:
layr(Layer): a Layer object
startoff(int); the starting offset
endoff(Optional[int]): the ending offset
newlayer(bool): whether to emit a new layer item first
wait(bool): when the end of the log is hit, whether to continue to wait for new entries and yield them
'''
topoffs = await self.getNexsIndx() # The latest offset when this function started
catchingup = True # whether we've caught up to topoffs
layrsadded = {} # layriden -> True. Captures all the layers added while catching up
todo = set() # outstanding futures of active live streaming from layers
layrgenrs = {} # layriden -> genr. maps active layers to that layer's async generator
if offsdict is None:
offsdict = {}
newtodoevent = asyncio.Event()
async with await s_base.Base.anit() as base:
def addlayr(layr, newlayer=False):
'''
A new layer joins the live stream
'''
genr = genrfunc(layr, topoffs, newlayer=newlayer)
layrgenrs[layr.iden] = genr
task = base.schedCoro(genr.__anext__())
task.iden = layr.iden
todo.add(task)
newtodoevent.set()
def onaddlayr(mesg):
etyp, event = mesg
layriden = event['iden']
layr = self.getLayer(layriden)
if catchingup:
layrsadded[layr] = True
return
addlayr(layr, newlayer=True)
self.on('core:layr:add', onaddlayr, base=base)
# First, catch up to what was the current offset when we started, guaranteeing order
genrs = [genrfunc(layr, offsdict.get(layr.iden, 0), endoff=topoffs) for layr in self.layers.values()]
async for item in s_common.merggenr(genrs, lambda x, y: x[0] < y[0]):
yield item
catchingup = False
if not wait:
return
# After we've caught up, read on genrs from all the layers simultaneously
todo.clear()
for layr in self.layers.values():
if layr not in layrsadded:
addlayr(layr)
for layr in layrsadded:
addlayr(layr, newlayer=True)
# Also, wake up if we get fini'd
finitask = base.schedCoro(self.waitfini())
todo.add(finitask)
newtodotask = base.schedCoro(newtodoevent.wait())
todo.add(newtodotask)
while not self.isfini:
newtodoevent.clear()
done, _ = await asyncio.wait(todo, return_when=asyncio.FIRST_COMPLETED)
for donetask in done:
try:
todo.remove(donetask)
if donetask is finitask: # pragma: no cover # We were fini'd
return
if donetask is newtodotask:
newtodotask = base.schedCoro(newtodoevent.wait())
todo.add(newtodotask)
continue
layriden = donetask.iden
result = donetask.result()
yield result
# Re-add a task to wait on the next iteration of the generator
genr = layrgenrs[layriden]
task = base.schedCoro(genr.__anext__())
task.iden = layriden
todo.add(task)
except StopAsyncIteration:
# Help out the garbage collector
del layrgenrs[layriden]
async def spliceHistory(self, user):
'''
Yield splices backwards from the end of the nodeedit log.
Will only return user's own splices unless they are an admin.
'''
layr = self.view.layers[0]
count = 0
async for _, mesg in layr.splicesBack():
count += 1
if not count % 1000: # pragma: no cover
await asyncio.sleep(0)
if user.iden == mesg[1]['user'] or user.isAdmin():
yield mesg
async def _initCoreHive(self):
stormvarsnode = await self.hive.open(('cortex', 'storm', 'vars'))
self.stormvars = await stormvarsnode.dict()
self.onfini(self.stormvars)
async def _initCoreAxon(self):
turl = self.conf.get('axon')
if turl is None:
path = os.path.join(self.dirn, 'axon')
conf = {}
proxyurl = self.conf.get('http:proxy')
if proxyurl is not None:
conf['http:proxy'] = proxyurl
self.axon = await s_axon.Axon.anit(path, conf=conf)
self.axon.onfini(self.axready.clear)
self.dynitems['axon'] = self.axon
self.axready.set()
return
async def teleloop():
self.axready.clear()
while not self.isfini:
try:
self.axon = await s_telepath.openurl(turl)
self.axon.onfini(teleloop)
self.dynitems['axon'] = self.axon
self.axready.set()
return
except asyncio.CancelledError: # TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning('remote axon error: %r' % (e,))
await self.waitfini(1)
self.schedCoro(teleloop())
async def _initStormCmds(self):
'''
Registration for built-in Storm commands.
'''
self.addStormCmd(s_storm.MaxCmd)
self.addStormCmd(s_storm.MinCmd)
self.addStormCmd(s_storm.TeeCmd)
self.addStormCmd(s_storm.TreeCmd)
self.addStormCmd(s_storm.HelpCmd)
self.addStormCmd(s_storm.IdenCmd)
self.addStormCmd(s_storm.SpinCmd)
self.addStormCmd(s_storm.SudoCmd)
self.addStormCmd(s_storm.UniqCmd)
self.addStormCmd(s_storm.CountCmd)
self.addStormCmd(s_storm.GraphCmd)
self.addStormCmd(s_storm.LimitCmd)
self.addStormCmd(s_storm.MergeCmd)
self.addStormCmd(s_storm.SleepCmd)
self.addStormCmd(s_storm.ScrapeCmd)
self.addStormCmd(s_storm.DelNodeCmd)
self.addStormCmd(s_storm.LiftByVerb)
self.addStormCmd(s_storm.MoveTagCmd)
self.addStormCmd(s_storm.ReIndexCmd)
self.addStormCmd(s_storm.EdgesDelCmd)
self.addStormCmd(s_storm.ParallelCmd)
self.addStormCmd(s_storm.TagPruneCmd)
self.addStormCmd(s_storm.ViewExecCmd)
self.addStormCmd(s_storm.BackgroundCmd)
self.addStormCmd(s_storm.SpliceListCmd)
self.addStormCmd(s_storm.SpliceUndoCmd)
self.addStormCmd(s_stormlib_macro.MacroExecCmd)
for cdef in s_stormsvc.stormcmds:
await self._trySetStormCmd(cdef.get('name'), cdef)
for cdef in s_storm.stormcmds:
await self._trySetStormCmd(cdef.get('name'), cdef)
for cdef in s_stormlib_macro.stormcmds:
await self._trySetStormCmd(cdef.get('name'), cdef)
for cdef in s_stormlib_model.stormcmds:
await self._trySetStormCmd(cdef.get('name'), cdef)
async def _initPureStormCmds(self):
oldcmds = []
for name, cdef in self.cmdhive.items():
cmdiden = cdef.get('cmdconf', {}).get('svciden')
if cmdiden and self.svchive.get(cmdiden) is None:
oldcmds.append(name)
else:
await self._trySetStormCmd(name, cdef)
for name in oldcmds:
logger.warning(f'Removing old command: [{name}]')
await self.cmdhive.pop(name)
for pkgdef in self.pkghive.values():
await self._tryLoadStormPkg(pkgdef)
async def _trySetStormCmd(self, name, cdef):
try:
await self._setStormCmd(cdef)
except (asyncio.CancelledError, Exception):
logger.exception(f'Storm command load failed: {name}')
def _initStormLibs(self):
'''
Registration for built-in Storm Libraries
'''
for path, ctor in s_stormtypes.registry.iterLibs():
# Skip libbase which is registered as a default ctor in the storm Runtime
if path:
self.addStormLib(path, ctor)
def _initSplicers(self):
'''
Registration for splice handlers.
'''
splicers = {
'tag:add': self._onFeedTagAdd,
'tag:del': self._onFeedTagDel,
'node:add': self._onFeedNodeAdd,
'node:del': self._onFeedNodeDel,
'prop:set': self._onFeedPropSet,
'prop:del': self._onFeedPropDel,
'tag:prop:set': self._onFeedTagPropSet,
'tag:prop:del': self._onFeedTagPropDel,
}
self.splicers.update(**splicers)
def _initFeedFuncs(self):
'''
Registration for built-in Cortex feed functions.
'''
self.setFeedFunc('syn.nodes', self._addSynNodes)
self.setFeedFunc('syn.splice', self._addSynSplice)
self.setFeedFunc('syn.nodeedits', self._addSynNodeEdits)
def _initCortexHttpApi(self):
'''
Registration for built-in Cortex httpapi endpoints
'''
self.addHttpApi('/api/v1/storm', s_httpapi.StormV1, {'cell': self})
self.addHttpApi('/api/v1/watch', s_httpapi.WatchSockV1, {'cell': self})
self.addHttpApi('/api/v1/storm/call', s_httpapi.StormCallV1, {'cell': self})
self.addHttpApi('/api/v1/storm/nodes', s_httpapi.StormNodesV1, {'cell': self})
self.addHttpApi('/api/v1/storm/export', s_httpapi.StormExportV1, {'cell': self})
self.addHttpApi('/api/v1/reqvalidstorm', s_httpapi.ReqValidStormV1, {'cell': self})
self.addHttpApi('/api/v1/storm/vars/set', s_httpapi.StormVarsSetV1, {'cell': self})
self.addHttpApi('/api/v1/storm/vars/get', s_httpapi.StormVarsGetV1, {'cell': self})
self.addHttpApi('/api/v1/storm/vars/pop', s_httpapi.StormVarsPopV1, {'cell': self})
self.addHttpApi('/api/v1/model', s_httpapi.ModelV1, {'cell': self})
self.addHttpApi('/api/v1/model/norm', s_httpapi.ModelNormV1, {'cell': self})
self.addHttpApi('/api/v1/core/info', s_httpapi.CoreInfoV1, {'cell': self})
async def getCellApi(self, link, user, path):
if not path:
return await self.cellapi.anit(self, link, user)
# allow an admin to directly open the cortex hive
# (perhaps this should be a Cell() level pattern)
if path[0] == 'hive' and user.isAdmin():
return await self.hiveapi.anit(self.hive, user)
if path[0] == 'layer':
if len(path) == 1:
# get the top layer for the default view
layr = self.getLayer()
return await self.layerapi.anit(self, link, user, layr)
if len(path) == 2:
layr = self.getLayer(path[1])
if layr is None:
raise s_exc.NoSuchLayer(iden=path[1])
return await self.layerapi.anit(self, link, user, layr)
if path[0] == 'view':
view = None
if len(path) == 1:
view = self.getView(user=user)
elif len(path) == 2:
view = self.getView(path[1], user=user)
if view is not None:
return await self.viewapi.anit(self, link, user, view)
raise s_exc.NoSuchPath(path=path)
async def getModelDict(self):
return self.model.getModelDict()
async def getModelDefs(self):
return self.model.getModelDefs()
async def getFormCounts(self):
'''
Return total form counts for all existing layers
'''
counts = collections.defaultdict(int)
for layr in self.layers.values():
layrcounts = await layr.getFormCounts()
for name, valu in layrcounts.items():
counts[name] += valu
return dict(counts)
def onTagAdd(self, name, func):
'''
Register a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
# TODO allow name wild cards
if '*' in name:
self.ontagaddglobs.add(name, func)
else:
self.ontagadds[name].append(func)
def offTagAdd(self, name, func):
'''
Unregister a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagaddglobs.rem(name, func)
return
cblist = self.ontagadds.get(name)
if cblist is None:
return
try:
cblist.remove(func)
except ValueError:
pass
def onTagDel(self, name, func):
'''
Register a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagdelglobs.add(name, func)
else:
self.ontagdels[name].append(func)
def offTagDel(self, name, func):
'''
Unregister a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagdelglobs.rem(name, func)
return
cblist = self.ontagdels.get(name)
if cblist is None:
return
try:
cblist.remove(func)
except ValueError:
pass
def addRuntLift(self, prop, func):
'''
Register a runt lift helper for a given prop.
Args:
prop (str): Full property name for the prop to register the helper for.
func:
Returns:
None: None.
'''
self._runtLiftFuncs[prop] = func
async def runRuntLift(self, full, valu=None, cmpr=None):
'''
Execute a runt lift function.
Args:
full (str): Property to lift by.
valu:
cmpr:
Returns:
bytes, list: Yields bytes, list tuples where the list contains a series of
key/value pairs which are used to construct a Node object.
'''
func = self._runtLiftFuncs.get(full)
if func is not None:
async for pode in func(full, valu, cmpr):
yield pode
def addRuntPropSet(self, full, func):
'''
Register a prop set helper for a runt form
'''
self._runtPropSetFuncs[full] = func
async def runRuntPropSet(self, node, prop, valu):
func = self._runtPropSetFuncs.get(prop.full)
if func is None:
raise s_exc.IsRuntForm(mesg='No prop:set func set for runt property.',
prop=prop.full, valu=valu, ndef=node.ndef)
ret = await s_coro.ornot(func, node, prop, valu)
return ret
def addRuntPropDel(self, full, func):
'''
Register a prop set helper for a runt form
'''
self._runtPropDelFuncs[full] = func
async def runRuntPropDel(self, node, prop):
func = self._runtPropDelFuncs.get(prop.full)
if func is None:
raise s_exc.IsRuntForm(mesg='No prop:del func set for runt property.',
prop=prop.full, ndef=node.ndef)
ret = await s_coro.ornot(func, node, prop)
return ret
async def _checkLayerModels(self):
mrev = s_modelrev.ModelRev(self)
await mrev.revCoreLayers()
async def _loadView(self, node):
view = await self.viewctor(self, node)
self.views[view.iden] = view
self.dynitems[view.iden] = view
async def fini():
self.views.pop(view.iden, None)
self.dynitems.pop(view.iden, None)
view.onfini(fini)
return view
async def _initCoreViews(self):
defiden = self.cellinfo.get('defaultview')
for iden, node in await self.hive.open(('cortex', 'views')):
view = await self._loadView(node)
if iden == defiden:
self.view = view
for view in self.views.values():
view.init2()
# if we have no views, we are initializing. Add a default main view and layer.
if not self.views:
assert self.inaugural, 'Cortex initialization failed: there are no views.'
ldef = {'name': 'default'}
ldef = await self.addLayer(ldef=ldef, nexs=False)
layriden = ldef.get('iden')
role = await self.auth.getRoleByName('all')
await role.addRule((True, ('layer', 'read')), gateiden=layriden, nexs=False)
vdef = {
'name': 'default',
'layers': (layriden,),
'worldreadable': True,
}
vdef = await self.addView(vdef, nexs=False)
iden = vdef.get('iden')
await self.cellinfo.set('defaultview', iden)
self.view = self.getView(iden)
async def addView(self, vdef, nexs=True):
vdef['iden'] = s_common.guid()
vdef.setdefault('parent', None)
vdef.setdefault('worldreadable', False)
vdef.setdefault('creator', self.auth.rootuser.iden)
s_view.reqValidVdef(vdef)
if nexs:
return await self._push('view:add', vdef)
else:
return await self._addView(vdef)
@s_nexus.Pusher.onPush('view:add')
async def _addView(self, vdef):
s_view.reqValidVdef(vdef)
iden = vdef['iden']
if iden in self.views:
return
for lyriden in vdef['layers']:
if lyriden not in self.layers:
raise s_exc.NoSuchLayer(iden=lyriden)
creator = vdef.get('creator', self.auth.rootuser.iden)
user = await self.auth.reqUser(creator)
await self.auth.addAuthGate(iden, 'view')
await user.setAdmin(True, gateiden=iden, logged=False)
# worldreadable is not get persisted with the view; the state ends up in perms
worldread = vdef.pop('worldreadable', False)
if worldread:
role = await self.auth.getRoleByName('all')
await role.addRule((True, ('view', 'read')), gateiden=iden, nexs=False)
node = await self.hive.open(('cortex', 'views', iden))
info = await node.dict()
for name, valu in vdef.items():
await info.set(name, valu)
view = await self._loadView(node)
view.init2()
return await view.pack()
async def delView(self, iden):
view = self.views.get(iden)
if view is None:
raise s_exc.NoSuchView(iden=iden)
return await self._push('view:del', iden)
@s_nexus.Pusher.onPush('view:del')
async def _delView(self, iden):
'''
Delete a cortex view by iden.
Note:
This does not delete any of the view's layers
'''
view = self.views.get(iden, None)
if view is None:
return
if iden == self.view.iden:
raise s_exc.SynErr(mesg='Cannot delete the main view')
for cview in self.views.values():
if cview.parent is not None and cview.parent.iden == iden:
raise s_exc.SynErr(mesg='Cannot delete a view that has children')
await self.hive.pop(('cortex', 'views', iden))
await view.delete()
await self.auth.delAuthGate(iden)
async def delLayer(self, iden):
layr = self.layers.get(iden, None)
if layr is None:
raise s_exc.NoSuchLayer(iden=iden)
return await self._push('layer:del', iden)
@s_nexus.Pusher.onPush('layer:del', passitem=True)
async def _delLayer(self, iden, nexsitem):
layr = self.layers.get(iden, None)
if layr is None:
return
for view in self.views.values():
if layr in view.layers:
raise s_exc.LayerInUse(iden=iden)
del self.layers[iden]
for pdef in layr.layrinfo.get('pushs', {}).values():
await self.delActiveCoro(pdef.get('iden'))
for pdef in layr.layrinfo.get('pulls', {}).values():
await self.delActiveCoro(pdef.get('iden'))
await self.auth.delAuthGate(iden)
self.dynitems.pop(iden)
await self.hive.pop(('cortex', 'layers', iden))
await layr.delete()
layr.deloffs = nexsitem[0]
async def setViewLayers(self, layers, iden=None):
'''
Args:
layers ([str]): A top-down list of of layer guids
iden (str): The view iden (defaults to default view).
'''
view = self.getView(iden)
if view is None:
raise s_exc.NoSuchView(iden=iden)
await view.setLayers(layers)
def getLayer(self, iden=None):
'''
Get a Layer object.
Args:
iden (str): The layer iden to retrieve.
Returns:
Layer: A Layer object.
'''
if iden is None:
return self.view.layers[0]
# For backwards compatibility, resolve references to old layer iden == cortex.iden to the main layer
# TODO: due to our migration policy, remove in 3.0.0
if iden == self.iden:
return self.view.layers[0]
return self.layers.get(iden)
def listLayers(self):
return self.layers.values()
async def getLayerDef(self, iden=None):
layr = self.getLayer(iden)
if layr is not None:
return await layr.pack()
async def getLayerDefs(self):
return [await lyr.pack() for lyr in list(self.layers.values())]
def getView(self, iden=None, user=None):
'''
Get a View object.
Args:
iden (str): The View iden to retrieve.
Returns:
View: A View object.
'''
if iden is None:
if user is not None:
iden = user.profile.get('cortex:view')
if iden is None:
iden = self.view.iden
# For backwards compatibility, resolve references to old view iden == cortex.iden to the main view
# TODO: due to our migration policy, remove in 3.0.0
if iden == self.iden:
iden = self.view.iden
view = self.views.get(iden)
if view is None:
return None
if user is not None:
user.confirm(('view', 'read'), gateiden=iden)
return view
def listViews(self):
return list(self.views.values())
async def getViewDef(self, iden):
view = self.getView(iden=iden)
if view is not None:
return await view.pack()
async def getViewDefs(self):
return [await v.pack() for v in list(self.views.values())]
async def addLayer(self, ldef=None, nexs=True):
'''
Add a Layer to the cortex.
Args:
ldef (Optional[Dict]): layer configuration
nexs (bool): whether to record a nexus transaction (internal use only)
'''
ldef = ldef or {}
ldef['iden'] = s_common.guid()
ldef.setdefault('creator', self.auth.rootuser.iden)
ldef.setdefault('lockmemory', self.conf.get('layers:lockmemory'))
ldef.setdefault('logedits', self.conf.get('layers:logedits'))
ldef.setdefault('readonly', False)
s_layer.reqValidLdef(ldef)
if nexs:
return await self._push('layer:add', ldef)
else:
return await self._addLayer(ldef, (None, None))
@s_nexus.Pusher.onPush('layer:add', passitem=True)
async def _addLayer(self, ldef, nexsitem):
s_layer.reqValidLdef(ldef)
iden = ldef.get('iden')
if iden in self.layers:
return
layr = self.layers.get(iden)
if layr is not None:
return await layr.pack()
creator = ldef.get('creator')
user = await self.auth.reqUser(creator)
node = await self.hive.open(('cortex', 'layers', iden))
layrinfo = await node.dict()
for name, valu in ldef.items():
await layrinfo.set(name, valu)
layr = await self._initLayr(layrinfo, nexsoffs=nexsitem[0])
await user.setAdmin(True, gateiden=iden, logged=False)
# forward wind the new layer to the current model version
await layr.setModelVers(s_modelrev.maxvers)
return await layr.pack()
async def _initLayr(self, layrinfo, nexsoffs=None):
'''
Instantiate a Layer() instance via the provided layer info HiveDict.
'''
layr = await self._ctorLayr(layrinfo)
layr.addoffs = nexsoffs
self.layers[layr.iden] = layr
self.dynitems[layr.iden] = layr
if self.maxnodes:
counts = await layr.getFormCounts()
self.nodecount += sum(counts.values())
def onadd():
self.nodecount += 1
def ondel():
self.nodecount -= 1
layr.nodeAddHook = onadd
layr.nodeDelHook = ondel
await self.auth.addAuthGate(layr.iden, 'layer')
for pdef in layrinfo.get('pushs', {}).values():
await self.runLayrPush(layr, pdef)
for pdef in layrinfo.get('pulls', {}).values():
await self.runLayrPull(layr, pdef)
await self.fire('core:layr:add', iden=layr.iden)
return layr
async def _ctorLayr(self, layrinfo):
'''
Actually construct the Layer instance for the given HiveDict.
'''
iden = layrinfo.get('iden')
path = s_common.gendir(self.dirn, 'layers', iden)
# In case that we're a mirror follower and we have a downstream layer, disable upstream sync
# TODO allow_upstream needs to be separated out
mirror = self.conf.get('mirror')
return await s_layer.Layer.anit(layrinfo, path, nexsroot=self.nexsroot, allow_upstream=not mirror)
async def _initCoreLayers(self):
node = await self.hive.open(('cortex', 'layers'))
for _, node in node:
layrinfo = await node.dict()
await self._initLayr(layrinfo)
@s_nexus.Pusher.onPushAuto('layer:push:add')
async def addLayrPush(self, layriden, pdef):
reqValidPush(pdef)
iden = pdef.get('iden')
layr = self.layers.get(layriden)
if layr is None:
return
pushs = layr.layrinfo.get('pushs')
if pushs is None:
pushs = {}
# handle last-message replay
if pushs.get(iden) is not None:
return
pushs[iden] = pdef
await layr.layrinfo.set('pushs', pushs)
await self.runLayrPush(layr, pdef)
@s_nexus.Pusher.onPushAuto('layer:push:del')
async def delLayrPush(self, layriden, pushiden):
layr = self.layers.get(layriden)
if layr is None:
return
pushs = layr.layrinfo.get('pushs')
if pushs is None:
return
pdef = pushs.pop(pushiden, None)
if pdef is None:
return
await layr.layrinfo.set('pushs', pushs)
await self.delActiveCoro(pushiden)
@s_nexus.Pusher.onPushAuto('layer:pull:add')
async def addLayrPull(self, layriden, pdef):
reqValidPull(pdef)
iden = pdef.get('iden')
layr = self.layers.get(layriden)
if layr is None:
return
pulls = layr.layrinfo.get('pulls')
if pulls is None:
pulls = {}
# handle last-message replay
if pulls.get(iden) is not None:
return
pulls[iden] = pdef
await layr.layrinfo.set('pulls', pulls)
await self.runLayrPull(layr, pdef)
@s_nexus.Pusher.onPushAuto('layer:pull:del')
async def delLayrPull(self, layriden, pulliden):
layr = self.layers.get(layriden)
if layr is None:
return
pulls = layr.layrinfo.get('pulls')
if pulls is None:
return
pdef = pulls.pop(pulliden, None)
if pdef is None:
return
await layr.layrinfo.set('pulls', pulls)
await self.delActiveCoro(pulliden)
async def runLayrPush(self, layr, pdef):
url = pdef.get('url')
iden = pdef.get('iden')
# push() will refire as needed
async def push():
async with await self.boss.promote(f'layer push: {layr.iden} {iden}', self.auth.rootuser):
async with await s_telepath.openurl(url) as proxy:
await self._pushBulkEdits(layr, proxy, pdef)
self.addActiveCoro(push, iden=iden)
async def runLayrPull(self, layr, pdef):
url = pdef.get('url')
iden = pdef.get('iden')
# pull() will refire as needed
async def pull():
async with await self.boss.promote(f'layer pull: {layr.iden} {iden}', self.auth.rootuser):
async with await s_telepath.openurl(url) as proxy:
await self._pushBulkEdits(proxy, layr, pdef)
self.addActiveCoro(pull, iden=iden)
async def _pushBulkEdits(self, layr0, layr1, pdef):
iden = pdef.get('iden')
user = pdef.get('user')
gvar = f'push:{iden}'
async with await s_base.Base.anit() as base:
queue = s_queue.Queue(maxsize=10000)
async def fill():
try:
filloffs = await self.getStormVar(gvar, -1)
async for item in layr0.syncNodeEdits(filloffs + 1, wait=True):
await queue.put(item)
await queue.close()
except asyncio.CancelledError: # pragma: no cover
raise
except Exception as e:
logger.exception(f'pushBulkEdits fill() error: {e}')
await queue.close()
base.schedCoro(fill())
async for chunk in queue.slices():
meta = {'time': s_common.now(), 'user': user}
alledits = []
for offs, edits in chunk:
# prevent push->push->push nodeedits growth
alledits.extend(edits)
if len(alledits) > 1000:
await layr1.storNodeEdits(alledits, meta)
await self.setStormVar(gvar, offs)
alledits.clear()
if alledits:
await layr1.storNodeEdits(alledits, meta)
await self.setStormVar(gvar, offs)
async def _checkNexsIndx(self):
layroffs = [await layr.getEditIndx() for layr in list(self.layers.values())]
if layroffs:
maxindx = max(layroffs)
if maxindx > await self.getNexsIndx():
await self.setNexsIndx(maxindx)
async def cloneLayer(self, iden, ldef=None):
'''
Make a copy of a Layer in the cortex.
Args:
iden (str): Layer iden to clone
ldef (Optional[Dict]): Layer configuration overrides
Note:
This should only be called with a reasonably static Cortex
due to possible races.
'''
layr = self.layers.get(iden, None)
if layr is None:
raise s_exc.NoSuchLayer(iden=iden)
ldef = ldef or {}
ldef['iden'] = s_common.guid()
ldef.setdefault('creator', self.auth.rootuser.iden)
return await self._push('layer:clone', iden, ldef)
@s_nexus.Pusher.onPush('layer:clone', passitem=True)
async def _cloneLayer(self, iden, ldef, nexsitem):
layr = self.layers.get(iden)
if layr is None:
return
newiden = ldef.get('iden')
if newiden in self.layers:
return
newpath = s_common.gendir(self.dirn, 'layers', newiden)
await layr.clone(newpath)
node = await self.hive.open(('cortex', 'layers', iden))
copynode = await self.hive.open(('cortex', 'layers', newiden))
layrinfo = await node.dict()
copyinfo = await copynode.dict()
for name, valu in layrinfo.items():
await copyinfo.set(name, valu)
for name, valu in ldef.items():
await copyinfo.set(name, valu)
copylayr = await self._initLayr(copyinfo, nexsoffs=nexsitem[0])
creator = copyinfo.get('creator')
user = await self.auth.reqUser(creator)
await user.setAdmin(True, gateiden=newiden, logged=False)
return await copylayr.pack()
def addStormCmd(self, ctor):
'''
Add a synapse.lib.storm.Cmd class to the cortex.
'''
if not s_grammar.isCmdName(ctor.name):
raise s_exc.BadCmdName(name=ctor.name)
self.stormcmds[ctor.name] = ctor
async def addStormDmon(self, ddef):
'''
Add a storm dmon task.
'''
iden = s_common.guid()
ddef['iden'] = iden
return await self._push('storm:dmon:add', ddef)
@s_nexus.Pusher.onPushAuto('storm:dmon:bump')
async def bumpStormDmon(self, iden):
ddef = self.stormdmonhive.get(iden)
if ddef is None:
return False
if self.isactive:
dmon = self.stormdmons.getDmon(iden)
if dmon is not None:
await dmon.bump()
return True
async def _bumpUserDmons(self, iden):
'''
Bump all the Dmons for a given user.
Args:
iden (str): User iden.
'''
for dmoniden, ddef in list(self.stormdmonhive.items()):
if ddef.get('user') == iden:
await self.bumpStormDmon(dmoniden)
@s_nexus.Pusher.onPushAuto('storm:dmon:enable')
async def enableStormDmon(self, iden):
ddef = self.stormdmonhive.get(iden)
if ddef is None:
return False
curv = ddef.get('enabled')
ddef['enabled'] = True
await self.stormdmonhive.set(iden, ddef)
if self.isactive and not curv:
dmon = self.stormdmons.getDmon(iden)
await dmon.run()
return True
@s_nexus.Pusher.onPushAuto('storm:dmon:disable')
async def disableStormDmon(self, iden):
ddef = self.stormdmonhive.get(iden)
if ddef is None:
return False
curv = ddef.get('enabled')
ddef['enabled'] = False
await self.stormdmonhive.set(iden, ddef)
if self.isactive and curv:
dmon = self.stormdmons.getDmon(iden)
await dmon.stop()
return True
@s_nexus.Pusher.onPush('storm:dmon:add')
async def _onAddStormDmon(self, ddef):
iden = ddef['iden']
dmon = self.stormdmons.getDmon(iden)
if dmon is not None:
return dmon.pack()
if ddef.get('user') is None:
user = await self.auth.getUserByName('root')
ddef['user'] = user.iden
dmon = await self.runStormDmon(iden, ddef)
await self.stormdmonhive.set(iden, ddef)
return dmon.pack()
async def delStormDmon(self, iden):
'''
Stop and remove a storm dmon.
'''
ddef = self.stormdmonhive.get(iden)
if ddef is None:
mesg = f'No storm daemon exists with iden {iden}.'
raise s_exc.NoSuchIden(mesg=mesg)
return await self._push('storm:dmon:del', iden)
@s_nexus.Pusher.onPush('storm:dmon:del')
async def _delStormDmon(self, iden):
ddef = await self.stormdmonhive.pop(iden)
if ddef is None: # pragma: no cover
return
await self.stormdmons.popDmon(iden)
def getStormCmd(self, name):
return self.stormcmds.get(name)
async def runStormDmon(self, iden, ddef):
# validate ddef before firing task
s_storm.reqValidDdef(ddef)
dmon = self.stormdmons.getDmon(iden)
if dmon is not None:
return dmon
await self.auth.reqUser(ddef['user'])
# raises if parser failure
self.getStormQuery(ddef.get('storm'))
dmon = await self.stormdmons.addDmon(iden, ddef)
return dmon
async def getStormDmon(self, iden):
return self.stormdmons.getDmonDef(iden)
async def getStormDmons(self):
return self.stormdmons.getDmonDefs()
async def getStormDmonLog(self, iden):
return self.stormdmons.getDmonRunlog(iden)
def addStormLib(self, path, ctor):
root = self.libroot
# (name, {kids}, {funcs})
for name in path:
step = root[1].get(name)
if step is None:
step = (name, {}, {})
root[1][name] = step
root = step
root[2]['ctor'] = ctor
def getStormLib(self, path):
root = self.libroot
for name in path:
step = root[1].get(name)
if step is None:
return None
root = step
return root
def getStormCmds(self):
return list(self.stormcmds.items())
async def getAxon(self):
await self.axready.wait()
return self.axon.iden
def setFeedFunc(self, name, func):
'''
Set a data ingest function.
def func(snap, items):
loaditems...
'''
self.feedfuncs[name] = func
def getFeedFunc(self, name):
'''
Get a data ingest function.
'''
return self.feedfuncs.get(name)
async def getFeedFuncs(self):
ret = []
for name, ctor in self.feedfuncs.items():
# TODO - Future support for feed functions defined via Storm.
doc = getattr(ctor, '__doc__', None)
if doc is None:
doc = 'No feed docstring'
doc = doc.strip()
desc = doc.split('\n')[0]
ret.append({'name': name,
'desc': desc,
'fulldoc': doc,
})
return tuple(ret)
async def _addSynNodes(self, snap, items):
'''
Add nodes to the Cortex via the packed node format.
'''
async for node in snap.addNodes(items):
yield node
async def _addSynSplice(self, snap, items):
for item in items:
func = self.splicers.get(item[0])
if func is None:
await snap.warn(f'no such splice: {item!r}')
continue
try:
await func(snap, item)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.exception('splice error')
await snap.warn(f'splice error: {e}')
async def _onFeedNodeAdd(self, snap, mesg):
ndef = mesg[1].get('ndef')
if ndef is None:
await snap.warn(f'Invalid Splice: {mesg!r}')
return
await snap.addNode(*ndef)
async def _onFeedNodeDel(self, snap, mesg):
ndef = mesg[1].get('ndef')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.delete()
async def _onFeedPropSet(self, snap, mesg):
ndef = mesg[1].get('ndef')
name = mesg[1].get('prop')
valu = mesg[1].get('valu')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.set(name, valu)
async def _onFeedPropDel(self, snap, mesg):
ndef = mesg[1].get('ndef')
name = mesg[1].get('prop')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.pop(name)
async def _onFeedTagAdd(self, snap, mesg):
ndef = mesg[1].get('ndef')
tag = mesg[1].get('tag')
valu = mesg[1].get('valu')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.addTag(tag, valu=valu)
async def _onFeedTagDel(self, snap, mesg):
ndef = mesg[1].get('ndef')
tag = mesg[1].get('tag')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.delTag(tag)
async def _onFeedTagPropSet(self, snap, mesg):
tag = mesg[1].get('tag')
prop = mesg[1].get('prop')
ndef = mesg[1].get('ndef')
valu = mesg[1].get('valu')
node = await snap.getNodeByNdef(ndef)
if node is not None:
await node.setTagProp(tag, prop, valu)
async def _onFeedTagPropDel(self, snap, mesg):
tag = mesg[1].get('tag')
prop = mesg[1].get('prop')
ndef = mesg[1].get('ndef')
node = await snap.getNodeByNdef(ndef)
if node is not None:
await node.delTagProp(tag, prop)
async def _addSynNodeEdits(self, snap, items):
for item in items:
item = s_common.unjsonsafe_nodeedits(item)
await snap.applyNodeEdits(item)
async def setUserLocked(self, iden, locked):
retn = await s_cell.Cell.setUserLocked(self, iden, locked)
await self._bumpUserDmons(iden)
return retn
def getCoreMod(self, name):
return self.modules.get(name)
def getCoreMods(self):
ret = []
for modname, mod in self.modules.items():
ret.append((modname, mod.conf))
return ret
def _initStormOpts(self, opts):
if opts is None:
opts = {}
opts.setdefault('user', self.auth.rootuser.iden)
return opts
def _viewFromOpts(self, opts):
user = self._userFromOpts(opts)
viewiden = opts.get('view')
if viewiden is None:
viewiden = user.profile.get('cortex:view')
if viewiden is None:
viewiden = self.view.iden
# For backwards compatibility, resolve references to old view iden == cortex.iden to the main view
# TODO: due to our migration policy, remove in 3.0.0
if viewiden == self.iden: # pragma: no cover
viewiden = self.view.iden
view = self.views.get(viewiden)
if view is None:
raise s_exc.NoSuchView(iden=viewiden)
user.confirm(('view', 'read'), gateiden=viewiden)
return view
def _userFromOpts(self, opts):
if opts is None:
return self.auth.rootuser
useriden = opts.get('user')
if useriden is None:
return self.auth.rootuser
user = self.auth.user(useriden)
if user is None:
mesg = f'No user found with iden: {useriden}'
raise s_exc.NoSuchUser(mesg, iden=useriden)
return user
async def count(self, text, opts=None):
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
i = 0
async for _ in view.eval(text, opts=opts):
i += 1
return i
async def storm(self, text, opts=None):
'''
'''
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
async for mesg in view.storm(text, opts=opts):
yield mesg
async def callStorm(self, text, opts=None):
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
return await view.callStorm(text, opts=opts)
async def exportStorm(self, text, opts=None):
opts = self._initStormOpts(opts)
user = self._userFromOpts(opts)
view = self._viewFromOpts(opts)
await self.boss.promote('storm:export', user=user, info={'query': text})
spooldict = await s_spooled.Dict.anit()
async with await self.snap(user=user, view=view) as snap:
async for pode in snap.iterStormPodes(text, opts=opts):
await spooldict.set(pode[1]['iden'], pode)
await asyncio.sleep(0)
for iden, pode in spooldict.items():
await asyncio.sleep(0)
edges = []
async for verb, n2iden in snap.iterNodeEdgesN1(s_common.uhex(iden)):
await asyncio.sleep(0)
if not spooldict.has(n2iden):
continue
edges.append((verb, n2iden))
if edges:
pode[1]['edges'] = edges
yield pode
async def feedFromAxon(self, sha256, opts=None):
opts = self._initStormOpts(opts)
user = self._userFromOpts(opts)
view = self._viewFromOpts(opts)
await self.boss.promote('feeddata', user=user, info={'name': 'syn.nodes', 'sha256': sha256})
# ensure that the user can make all node edits in the layer
user.confirm(('node',), gateiden=view.layers[0].iden)
q = s_queue.Queue(maxsize=10000)
feedexc = None
async with await s_base.Base.anit() as base:
async def fill():
nonlocal feedexc
try:
async for item in self.axon.iterMpkFile(sha256):
await q.put(item)
except Exception as e:
logger.exception(f'feedFromAxon.fill(): {e}')
feedexc = e
finally:
await q.close()
base.schedCoro(fill())
count = 0
async with await self.snap(user=user, view=view) as snap:
# feed the items directly to syn.nodes
async for items in q.slices(size=100):
async for node in self._addSynNodes(snap, items):
count += 1
if feedexc is not None:
raise feedexc
return count
async def nodes(self, text, opts=None):
'''
A simple non-streaming way to return a list of nodes.
'''
if self.isfini: # pragma: no cover
raise s_exc.IsFini()
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
return await view.nodes(text, opts=opts)
async def eval(self, text, opts=None):
'''
Evaluate a storm query and yield packed nodes.
NOTE: This API is deprecated as of 2.0.0 and will be removed in 3.0.0
'''
s_common.deprecated('eval')
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
async for node in view.eval(text, opts=opts):
yield node
async def stormlist(self, text, opts=None):
return [m async for m in self.storm(text, opts=opts)]
@s_cache.memoizemethod(size=10000)
def getStormQuery(self, text, mode='storm'):
'''
Parse storm query text and return a Query object.
'''
query = copy.deepcopy(s_parser.parseQuery(text, mode=mode))
query.init(self)
return query
@contextlib.asynccontextmanager
async def getStormRuntime(self, query, opts=None):
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
user = self._userFromOpts(opts)
async with await self.snap(user=user, view=view) as snap:
async with snap.getStormRuntime(query, opts=opts, user=user) as runt:
yield runt
async def reqValidStorm(self, text, opts=None):
'''
Parse a storm query to validate it.
Args:
text (str): The text of the Storm query to parse.
opts (dict): A Storm options dictionary.
Returns:
True: If the query is valid.
Raises:
BadSyntaxError: If the query is invalid.
'''
if opts is None:
opts = {}
mode = opts.get('mode', 'storm')
self.getStormQuery(text, mode)
return True
def _logStormQuery(self, text, user):
'''
Log a storm query.
'''
if self.conf.get('storm:log'):
lvl = self.conf.get('storm:log:level')
stormlogger.log(lvl, 'Executing storm query {%s} as [%s]', text, user.name,
extra={'synapse': {'text': text, 'username': user.name, 'user': user.iden}})
async def getNodeByNdef(self, ndef, view=None):
'''
Return a single Node() instance by (form,valu) tuple.
'''
name, valu = ndef
form = self.model.forms.get(name)
if form is None:
raise s_exc.NoSuchForm(name=name)
norm, info = form.type.norm(valu)
buid = s_common.buid((form.name, norm))
async with await self.snap(view=view) as snap:
return await snap.getNodeByBuid(buid)
def getCoreInfo(self):
s_common.deprecated('Cortex.getCoreInfo')
return {
'version': synapse.version,
'modeldef': self.model.getModelDefs(),
'stormcmds': {cmd: {} for cmd in self.stormcmds.keys()},
}
async def getCoreInfoV2(self):
return {
'version': synapse.version,
'modeldict': await self.getModelDict(),
'stormdocs': await self.getStormDocs(),
}
async def getStormDocs(self):
'''
Get a struct containing the Storm Types documentation.
Returns:
dict: A Dictionary of storm documentation information.
'''
ret = {
'libraries': s_stormtypes.registry.getLibDocs(),
'types': s_stormtypes.registry.getTypeDocs(),
# 'cmds': ... # TODO - support cmd docs
# 'packages': ... # TODO - Support inline information for packages?
}
return ret
async def addNodes(self, nodedefs, view=None):
'''
Quickly add/modify a list of nodes from node definition tuples.
This API is the simplest/fastest way to add nodes, set node props,
and add tags to nodes remotely.
Args:
nodedefs (list): A list of node definition tuples. See below.
A node definition tuple is defined as:
( (form, valu), {'props':{}, 'tags':{})
The "props" or "tags" keys may be omitted.
'''
async with await self.snap(view=view) as snap:
snap.strict = False
async for node in snap.addNodes(nodedefs):
yield node
async def addFeedData(self, name, items, *, viewiden=None):
'''
Add data using a feed/parser function.
Args:
name (str): The name of the feed record format.
items (list): A list of items to ingest.
iden (str): The iden of a view to use.
If a view is not specified, the default view is used.
'''
view = self.getView(viewiden)
if view is None:
raise s_exc.NoSuchView(iden=viewiden)
async with await self.snap(view=view) as snap:
snap.strict = False
await snap.addFeedData(name, items)
async def snap(self, user=None, view=None):
'''
Return a transaction object for the default view.
Args:
user (str): The user to get the snap for.
view (View): View object to use when making the snap.
Notes:
This must be used as an asynchronous context manager.
Returns:
s_snap.Snap: A Snap object for the view.
'''
if view is None:
view = self.view
if user is None:
user = await self.auth.getUserByName('root')
snap = await view.snap(user)
return snap
async def loadCoreModule(self, ctor, conf=None):
'''
Load a single cortex module with the given ctor and conf.
Args:
ctor (str): The python module class path
conf (dict):Config dictionary for the module
'''
if conf is None:
conf = {}
modu = self._loadCoreModule(ctor, conf=conf)
try:
await s_coro.ornot(modu.preCoreModule)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
mdefs = modu.getModelDefs()
self.model.addDataModels(mdefs)
cmds = modu.getStormCmds()
[self.addStormCmd(c) for c in cmds]
try:
await s_coro.ornot(modu.initCoreModule)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
await self.fire('core:module:load', module=ctor)
return modu
async def _loadCoreMods(self):
mods = []
cmds = []
mdefs = []
for ctor in list(s_modules.coremods):
await self._preLoadCoreModule(ctor, mods, cmds, mdefs)
for ctor in self.conf.get('modules'):
await self._preLoadCoreModule(ctor, mods, cmds, mdefs, custom=True)
self.model.addDataModels(mdefs)
[self.addStormCmd(c) for c in cmds]
async def _preLoadCoreModule(self, ctor, mods, cmds, mdefs, custom=False):
conf = None
# allow module entry to be (ctor, conf) tuple
if isinstance(ctor, (list, tuple)):
ctor, conf = ctor
modu = self._loadCoreModule(ctor, conf=conf)
if modu is None:
return
mods.append(modu)
try:
await s_coro.ornot(modu.preCoreModule)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
cmds.extend(modu.getStormCmds())
model_defs = modu.getModelDefs()
if custom:
for _mdef, mnfo in model_defs:
mnfo['custom'] = True
mdefs.extend(model_defs)
async def _initCoreMods(self):
with s_provenance.claim('init', meth='_initCoreMods'):
for ctor, modu in list(self.modules.items()):
try:
await s_coro.ornot(modu.initCoreModule)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
def _loadCoreModule(self, ctor, conf=None):
if ctor in self.modules:
raise s_exc.ModAlreadyLoaded(mesg=f'{ctor} already loaded')
try:
modu = s_dyndeps.tryDynFunc(ctor, self, conf=conf)
self.modules[ctor] = modu
return modu
except Exception:
logger.exception('mod load fail: %s' % (ctor,))
return None
async def stat(self):
stats = {
'iden': self.iden,
'layer': await self.getLayer().stat(),
'formcounts': await self.getFormCounts(),
}
return stats
async def getPropNorm(self, prop, valu):
'''
Get the normalized property value based on the Cortex data model.
Args:
prop (str): The property to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchProp: If the prop does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
pobj = self.model.prop(prop)
if pobj is None:
raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.',
prop=prop)
norm, info = pobj.type.norm(valu)
return norm, info
async def getTypeNorm(self, name, valu):
'''
Get the normalized type value based on the Cortex data model.
Args:
name (str): The type to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchType: If the type does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
tobj = self.model.type(name)
if tobj is None:
raise s_exc.NoSuchType(mesg=f'The type {name} does not exist.',
name=name)
norm, info = tobj.norm(valu)
return norm, info
@staticmethod
def _convert_reqdict(reqdict):
return {s_agenda.TimeUnit.fromString(k): v for (k, v) in reqdict.items()}
async def addCronJob(self, cdef):
'''
Add a cron job to the cortex. Convenience wrapper around agenda.add
A cron job is a persistently-stored item that causes storm queries to be run in the future. The specification
for the times that the queries run can be one-shot or recurring.
Args:
query (str): The storm query to execute in the future
reqs (Union[Dict[str, Union[int, List[int]]], List[Dict[...]]]):
Either a dict of the fixed time fields or a list of such dicts. The keys are in the set ('year',
'month', 'dayofmonth', 'dayofweek', 'hour', 'minute'. The values must be positive integers, except for
the key of 'dayofmonth' in which it may also be a negative integer which represents the number of days
from the end of the month with -1 representing the last day of the month. All values may also be lists
of valid values.
incunit (Optional[str]):
A member of the same set as above, with an additional member 'day'. If is None (default), then the
appointment is one-shot and will not recur.
incvals (Union[int, List[int]):
A integer or a list of integers of the number of units
Returns (bytes):
An iden that can be used to later modify, query, and delete the job.
Notes:
reqs must have fields present or incunit must not be None (or both)
The incunit if not None it must be larger in unit size than all the keys in all reqs elements.
Non-recurring jobs may also have a req of 'now' which will cause the job to also execute immediately.
'''
s_agenda.reqValidCdef(cdef)
incunit = cdef.get('incunit')
reqs = cdef.get('reqs')
try:
if incunit is not None:
if isinstance(incunit, (list, tuple)):
incunit = [s_agenda.TimeUnit.fromString(i) for i in incunit]
else:
incunit = s_agenda.TimeUnit.fromString(incunit)
cdef['incunit'] = incunit
if isinstance(reqs, Mapping):
reqs = self._convert_reqdict(reqs)
else:
reqs = [self._convert_reqdict(req) for req in reqs]
if incunit is not None and s_agenda.TimeUnit.NOW in reqs:
mesg = "Recurring jobs may not be scheduled to run 'now'"
raise s_exc.BadConfValu(mesg)
cdef['reqs'] = reqs
except KeyError:
raise s_exc.BadConfValu('Unrecognized time unit')
cdef['iden'] = s_common.guid()
return await self._push('cron:add', cdef)
@s_nexus.Pusher.onPush('cron:add')
async def _onAddCronJob(self, cdef):
iden = cdef['iden']
appt = self.agenda.appts.get(iden)
if appt is not None:
return appt.pack()
user = await self.auth.reqUser(cdef['creator'])
cdef = await self.agenda.add(cdef)
await self.auth.addAuthGate(iden, 'cronjob')
await user.setAdmin(True, gateiden=iden, logged=False)
return cdef
@s_nexus.Pusher.onPushAuto('cron:del')
async def delCronJob(self, iden):
'''
Delete a cron job
Args:
iden (bytes): The iden of the cron job to be deleted
'''
try:
await self.agenda.delete(iden)
except s_exc.NoSuchIden:
return
await self.auth.delAuthGate(iden)
@s_nexus.Pusher.onPushAuto('cron:mod')
async def updateCronJob(self, iden, query):
'''
Change an existing cron job's query
Args:
iden (bytes): The iden of the cron job to be changed
'''
await self.agenda.mod(iden, query)
@s_nexus.Pusher.onPushAuto('cron:enable')
async def enableCronJob(self, iden):
'''
Enable a cron job
Args:
iden (bytes): The iden of the cron job to be changed
'''
await self.agenda.enable(iden)
@s_nexus.Pusher.onPushAuto('cron:disable')
async def disableCronJob(self, iden):
'''
Enable a cron job
Args:
iden (bytes): The iden of the cron job to be changed
'''
await self.agenda.disable(iden)
async def listCronJobs(self):
'''
Get information about all the cron jobs accessible to the current user
'''
crons = []
for _, cron in self.agenda.list():
info = cron.pack()
user = self.auth.user(cron.creator)
info['username'] = user.name
crons.append(info)
return crons
@s_nexus.Pusher.onPushAuto('cron:edit')
async def editCronJob(self, iden, name, valu):
'''
Modify a cron job definition.
'''
appt = await self.agenda.get(iden)
# TODO make this generic and check cdef
if name == 'name':
await appt.setName(str(valu))
return appt.pack()
if name == 'doc':
await appt.setDoc(str(valu))
return appt.pack()
mesg = f'editCronJob name {name} is not supported for editing.'
raise s_exc.BadArg(mesg=mesg)
async def _enableMigrationMode(self):
'''
Prevents cron jobs and triggers from running
'''
self.agenda.enabled = False
self.trigson = False
async def _disableMigrationMode(self):
'''
Allows cron jobs and triggers to run
'''
if self.conf.get('cron:enable'):
self.agenda.enabled = True
if self.conf.get('trigger:enable'):
self.trigson = True
async def iterFormRows(self, layriden, form, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes of a single form, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
form (str): A form name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterFormRows(form, stortype=stortype, startvalu=startvalu):
yield item
async def iterPropRows(self, layriden, form, prop, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes with a particular secondary property, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
form (str): A form name.
prop (str): A universal property name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterPropRows(form, prop, stortype=stortype, startvalu=startvalu):
yield item
async def iterUnivRows(self, layriden, prop, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes with a particular universal property, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
prop (str): A universal property name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterUnivRows(prop, stortype=stortype, startvalu=startvalu):
yield item
async def iterTagRows(self, layriden, tag, form=None, starttupl=None):
'''
Yields (buid, (valu, form)) values that match a tag and optional form, optionally (re)starting at starttupl.
Args:
layriden (str): Iden of the layer to retrieve the nodes
tag (str): the tag to match
form (Optional[str]): if present, only yields buids of nodes that match the form.
starttupl (Optional[Tuple[buid, form]]): if present, (re)starts the stream of values there.
Returns:
AsyncIterator[Tuple(buid, (valu, form))]
Note:
This yields (buid, (tagvalu, form)) instead of just buid, valu in order to allow resuming an interrupted
call by feeding the last value retrieved into starttupl
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterTagRows(tag, form=form, starttupl=starttupl):
yield item
async def iterTagPropRows(self, layriden, tag, prop, form=None, stortype=None, startvalu=None):
'''
Yields (buid, valu) that match a tag:prop, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
tag (str): tag name
prop (str): prop name
form (Optional[str]): optional form name
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterTagPropRows(tag, prop, form=form, stortype=stortype, startvalu=startvalu):
yield item
@contextlib.asynccontextmanager
async def getTempCortex(mods=None):
'''
Get a proxy to a cortex backed by a temporary directory.
Args:
mods (list): A list of modules which are loaded into the cortex.
Notes:
The cortex and temporary directory are town down on exit.
This should only be called from synchronous code.
Returns:
Proxy to the cortex.
'''
with s_common.getTempDir() as dirn:
async with await Cortex.anit(dirn) as core:
if mods:
for mod in mods:
await core.loadCoreModule(mod)
async with core.getLocalProxy() as prox:
yield prox
|
py | 1a37345db9048c4cddbfbfb2300ae6d340f778ce | from argparse import Action
from enum import Enum
class EnumNameAction(Action):
"""
Argparse action for handling Enums
Adapted from:
https://stackoverflow.com/questions/43968006/support-for-enum-arguments-in-argparse
"""
def __init__(self, **kwargs):
# Pop off the type value
enum = kwargs.pop("type", None)
# Ensure an Enum subclass is provided
if enum is None:
raise ValueError("type must be assigned an Enum when using EnumNameAction")
if not issubclass(enum, Enum):
raise TypeError("type must be an Enum when using EnumNameAction")
# Generate choices from the Enum
kwargs.setdefault("choices", tuple(e.name for e in enum))
super().__init__(**kwargs)
self._enum = enum
def __call__(self, parser, namespace, values, option_string=None):
# Convert value back into an Enum
enum = self._enum[values]
setattr(namespace, self.dest, enum)
class EnumLowerNameAction(Action):
"""
Argparse action for handling Enums, presenting the choices in lower case
but accepting inputs independent of case. It assumes that the Enum
member names are upper case.
Adapted from:
https://stackoverflow.com/questions/43968006/support-for-enum-arguments-in-argparse
https://stackoverflow.com/questions/27616778/case-insensitive-argparse-choices
"""
class cilist(list):
def __contains__(self, other):
# Convert to lower case for the comparison, since
# the list was built in lower ase.
return super().__contains__(other.lower())
def __init__(self, **kwargs):
# Pop off the type value
enum = kwargs.pop("type", None)
# Ensure an Enum subclass is provided
if enum is None:
raise ValueError("type must be assigned an Enum when using EnumNameAction")
if not issubclass(enum, Enum):
raise TypeError("type must be an Enum when using EnumNameAction")
# Generate choices from the Enum, converting the names to lower case:
kwargs.setdefault("choices", self.cilist(e.name.lower() for e in enum))
super().__init__(**kwargs)
self._enum = enum
def __call__(self, parser, namespace, values, option_string=None):
# Convert value back into an Enum
enum = self._enum[values.upper()]
setattr(namespace, self.dest, enum)
|
py | 1a37352100ea2cd149c8b3782928ed7a29b98154 | import torch
from safe_explorer.core.config import Config
from safe_explorer.core.net import Net, ConvNet
from safe_explorer.ddpg.utils import init_fan_in_uniform
class Actor(Net):
def __init__(self, observation_dim, action_dim):
config = Config.get().ddpg.actor
super(Actor, self).__init__(observation_dim,
action_dim,
config.layers,
config.init_bound,
init_fan_in_uniform,
torch.tanh) |
py | 1a37388cb2e95b85ed4f9e9b3b91349e81e61f88 | """
Простая реализация Шифра Цезаря.
"""
def cycle_text(text: str, n: int) -> str:
"""
Циклически сдвигает текст `text` на заданное количество позиций `n`.
>>> cycle_text('asdf', 1)
'sdfa'
>>> cycle_text('asdf', -1)
'fasd'
:param text: текст
:param n: число позиций, на которое нужно сдвинуть текст.
- Больше нуля - сдвиг влево.
- Меньше нуля - сдвиг вправо.
:return: текст, сдвинутый на заданное количество позиций.
"""
return text[n:] + text[:n]
class CaesarCipher:
"""
Шифр цезаря.
Ширования осуществляется путём сдвига на заданное количество символов
исходного алфавита и замена на эти символы исходного текста. Получателю
в качестве ключа нужно знать в какую сторону сдвигать алфавит.
"""
def __init__(self, alphabet: str):
"""
:param alphabet: строка с алфавитом. Все буквы записаны слитно в той
последовательности, в которой они идут стандартно.
"""
self.alphabet = alphabet
def encrypt(self, text: str, key: int) -> str:
"""
Зашифровывает текст `text` с помощью ключа `key`.
>>> cc.encrypt('привет', 1)
'рсйгёу'
:param text: исходный текст, который необходимо зашифровать.
:param key: ключ-число для сдвига алфавита.
:return: зашифрованный текст.
"""
return self._cipher(text, key)
def decrypt(self, text: str, key: int) -> str:
"""
Расшифровывает текст `text` с помощью ключа `key`. Так как ключ `key`
для зашифровки - то умножением на -1 получится ключ для расшифровки.
>>> cc.decrypt('рсйгёу', 1)
'привет'
:param text: текст, который необходимо расшифровать.
:param key: ключ-число для сдвига алфавита.
:return: зашифрованный текст.
"""
return self._cipher(text, key * -1)
def _cipher(self, text: str, key: int) -> str:
"""
Производит сам алгоритм шифровки/дешифровки текста `text` путём
сопоставления исходного и целевого алфавитов и замены символов в тексте.
:param text: текст, который необходимо зашифровать/расшифровать.
:param key: ключ-число для сдвига алфавита.
:return: обработанный текст.
"""
target_alphabet = cycle_text(self.alphabet, key)
encrypted_text = ''
for char in text:
i = self.alphabet.find(char)
replacer_char = target_alphabet[i]
encrypted_text += replacer_char
return encrypted_text
if __name__ == '__main__':
import doctest
doctest.testmod(extraglobs={'cc': CaesarCipher('абвгдеёжзийклмнопрстуфхцчшщьыъэюя')})
|
py | 1a373a6cf330a12c050a86489c533105f173fb73 | """
This file offers the methods to automatically retrieve the graph Intestinibacter bartlettii.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:51:07.006324
The undirected graph Intestinibacter bartlettii has 2775 nodes and 214929
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.05584 and has 13 connected components, where the component
with most nodes has 2739 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 132, the mean node degree is 154.90,
and the node degree mode is 7. The top 5 most central nodes are 445973.CLOBAR_01516
(degree 1127), 445973.CLOBAR_00656 (degree 955), 445973.CLOBAR_02505 (degree
870), 445973.CLOBAR_00502 (degree 784) and 445973.CLOBAR_01376 (degree
763).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import IntestinibacterBartlettii
# Then load the graph
graph = IntestinibacterBartlettii()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def IntestinibacterBartlettii(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Intestinibacter bartlettii graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Intestinibacter bartlettii graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:51:07.006324
The undirected graph Intestinibacter bartlettii has 2775 nodes and 214929
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.05584 and has 13 connected components, where the component
with most nodes has 2739 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 132, the mean node degree is 154.90,
and the node degree mode is 7. The top 5 most central nodes are 445973.CLOBAR_01516
(degree 1127), 445973.CLOBAR_00656 (degree 955), 445973.CLOBAR_02505 (degree
870), 445973.CLOBAR_00502 (degree 784) and 445973.CLOBAR_01376 (degree
763).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import IntestinibacterBartlettii
# Then load the graph
graph = IntestinibacterBartlettii()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="IntestinibacterBartlettii",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a373aeac2c6c5a3fe20888700bb14898fc46add | import asyncio
import collections
import gc
from contextlib import contextmanager, suppress
import copy
import functools
from glob import glob
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from time import sleep
import uuid
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
import pytest
import dask
from tlz import merge, memoize, assoc
from tornado import gen
from tornado.ioloop import IOLoop
from . import system
from .client import default_client, _global_clients, Client
from .compatibility import WINDOWS
from .comm import Comm
from .config import initialize_logging
from .core import connect, rpc, CommClosedError, Status
from .deploy import SpecCluster
from .metrics import time
from .process import _cleanup_dangling
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
log_errors,
mp_context,
get_ip,
get_ipv6,
DequeHandler,
reset_logger_locks,
sync,
iscoroutinefunction,
thread_state,
_offload_executor,
TimeoutError,
)
from .worker import Worker
from .nanny import Nanny
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, port=0, **kwargs):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, **kwargs):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, **kwargs):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=1,
disconnect_timeout=3,
scheduler_kwargs={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=2)
with suppress(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with suppress(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with suppress(EnvironmentError, CommClosedError):
with rpc(addr, **rpc_kwargs) as w:
await w.terminate(close=True)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses])
def gen_test(timeout=10):
"""Coroutine test
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs),
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() - start > 5:
await asyncio.gather(*[w.close(timeout=1) for w in workers])
await s.close(fast=True)
raise Exception("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*[end_worker(w) for w in workers])
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=10,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
allow_unclosed=False,
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
def test_func():
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for i in range(5):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster, retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = asyncio.wait_for(future, timeout)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 5:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(10)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
active_threads_start = set(threading._active)
yield
start = time()
while True:
bad = [
t
for t, v in threading._active.items()
if t not in active_threads_start
and "Threaded" not in v.name
and "watch message" not in v.name
and "TCP-Executor" not in v.name
]
if not bad:
break
else:
sleep(0.01)
if time() > start + 5:
from distributed import profile
tid = bad[0]
thread = threading._active[tid]
call_stacks = profile.call_stack(sys._current_frames()[tid])
assert False, (thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(200):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
_cleanup_dangling()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status == Status.running:
w.loop.add_callback(w.close)
Worker._instances.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
@contextmanager
def null():
yield
with check_thread_leak() if threads else null():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else null():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with suppress(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with clean():
yield
|
py | 1a373b06d6d691ce9eb8d8373cc4e07c9302d418 | # -*- coding: utf-8 -*-
#
# Database upgrade script
#
# RLPCM Template Version 1.1.5 => 1.1.6
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/BRCMS/RLP/upgrade/1.1.5-1.1.6.py
#
import sys
from uuid import uuid4
#from gluon.storage import Storage
#from gluon.tools import callback
from s3 import S3Duplicate
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
def info(msg):
sys.stderr.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
# Load models for tables
#ftable = s3db.org_facility
IMPORT_XSLT_FOLDER = os.path.join(request.folder, "static", "formats", "s3csv")
TEMPLATE_FOLDER = os.path.join(request.folder, "modules", "templates", "BRCMS")
# -----------------------------------------------------------------------------
# Upgrade user roles
#
if not failed:
info("Upgrade user roles")
bi = s3base.S3BulkImporter()
filename = os.path.join(TEMPLATE_FOLDER, "RLP", "auth_roles.csv")
with open(filename, "r") as File:
try:
bi.import_role(filename)
except Exception as e:
infoln("...failed")
infoln(sys.exc_info()[1])
failed = True
else:
infoln("...done")
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("UPGRADE FAILED - Action rolled back.")
else:
db.commit()
infoln("UPGRADE SUCCESSFUL.")
|
py | 1a373cc887d600632ff4c6b60c05538cbc538057 | import sys
import os
import nbformat as nbf
import re
class Neatbook:
def __init__(self, ):
PROJECT_FILE = os.path.realpath(os.path.basename(sys.argv[0]))
PROJECT_PATH = re.match("(.*[/\\\])", PROJECT_FILE).group(1)
PROJECT_NAME = re.match(".*[/\\\]+([^/\\\]+)[/\\\]+$", PROJECT_PATH).group(1)
nb = nbf.v4.new_notebook()
header1 = """\
# {} Neatbook
#### Get Data""".format(PROJECT_NAME.capitalize())
code1 = """\
import pandas as pd
import numpy as np
# Get data here
df = pd.read_csv("train.csv") # Edit: Your dataset
# classDF = pd.read_csv("train_labels.csv", header=None)
# df = pd.concat([df, classDF], axis=1)
print(df.shape)
print(df.describe(include = [np.number]))
print(df.dtypes)
print(df.describe(include = ['O']))
df.head()
"""
header2 = """\
#### Initialize variables"""
code2 = """\
from sklearn.model_selection import train_test_split
className = 'class' # Edit: Replace class with the Y column name
trainX, testX, trainY, testY = train_test_split(df.drop([className], axis=1),
df[className], train_size=0.75, test_size=0.25)
indexColumns = [] # Edit: Optionally add column names
iWillManuallyCleanColumns = [] # Edit: Optionally add column names
print("trainX.shape = ", trainX.shape)
print("testX.shape = ", testX.shape)
print("trainY.shape = ", trainY.shape)
print("testY.shape = ", testY.shape)
print("\ntrainY\n")
print(trainY.head())
print("trainX\n")
print(trainX.head())
"""
header3 = """\
#### Clean Data"""
code3 = """\
from neatdata.neatdata import *
neatdata = NeatData()
cleanTrainX, cleanTrainY = neatdata.cleanTrainingDataset(trainX, trainY, indexColumns, iWillManuallyCleanColumns)
cleanTestX = neatdata.cleanTestDataset(testX)
cleanTestY = neatdata.convertYToNumbersForModeling(testY)
print("Cleaning done")
"""
header4 = """\
#### Review Cleaned Data"""
code4 = """\
print(cleanTrainX.describe(include = [np.number]))
print(cleanTrainX.head())
print(cleanTrainY)
print(cleanTestX.describe(include = [np.number]))
print(cleanTestX.head())
print(cleanTestY)
"""
header5 = """\
#### Run TPOT"""
code5 = """\
from tpot import TPOTClassifier
tpot = TPOTClassifier(max_time_mins=5, # Edit: Set to 480 to train for 8 hours
population_size=100, max_eval_time_mins=5, verbosity=2)
tpot.fit(cleanTrainX, cleanTrainY)
print(tpot.score(cleanTestX, cleanTestY))
tpot.export('tpot_pipeline.py')
print("\\n\\nTPOT is done.")
"""
header6 = """\
## Run this after TPOT is done
Creates the modelpipeline.py file. That file also creates the trainedmodelpipeline.py.
"""
code6 = """\
with open('modelpipeline.py', 'w') as fileOut:
with open('tpot_pipeline.py', 'r') as fileIn:
for line in fileIn:
if line.startswith("import") or line.startswith("from "):
fileOut.write(line)
fileOut.write(\"\"\"from sklearn.metrics import accuracy_score
from neatdata.neatdata import *
from sklearn.metrics import confusion_matrix
import pickle
class ModelPipeline:
def __init__(self):
self.indexColumns, self.iWillManuallyCleanColumns = None, None
self.neatData = NeatData()
self.className = 'class' # Edit: Replace class with the Y column name
self.indexColumns = [] # Edit: Optionally add column names
self.iWillManuallyCleanColumns = [] # Edit: Optionally add column names
self.cleanTrainX, self.cleanTrainY, self.cleanTestX, self.cleanTestY = None, None, None, None
self.results = None
def execute(self):
trainX, testX, trainY, testY = self._getDatasetFrom________() # Edit: choose one of two functions
self._cleanDatasets(trainX, testX, trainY, testY)
self._modelFit()
self._printModelScores()
self._createTrainedModelPipelineFile()
self._saveObjectsToDisk()
self._createTrainedModelPipelineFile()
def _getDatasetFromOneFile(self):
df = pd.read_csv('train.csv') # Edit: Your dataset
# df = pd.read_csv('train.csv', header=None)
# classDF = pd.read_csv("train_labels.csv", header=None, names=["class"])
# df = pd.concat([df, classDF], axis=1)
trainX, testX, trainY, testY = train_test_split(df.drop([self.className], axis=1),
df[self.className], train_size=0.75, test_size=0.25)
return trainX, testX, trainY, testY
def _getDatasetFromTwoFiles(self):
trainingDf = pd.read_csv('train.csv') # Edit: Your training dataset
testDf = pd.read_csv('test.csv') # Edit: Your test dataset
trainX = trainingDf.drop([self.className], axis=1)
trainY = trainingDf[self.className]
testX = testDf.drop([self.className], axis=1)
testY = testDf[self.className]
return trainX, testX, trainY, testY
def _cleanDatasets(self, trainX, testX, trainY, testY):
self.cleanTrainX, self.cleanTrainY = self.neatData.cleanTrainingDataset(trainX, trainY, self.indexColumns, self.iWillManuallyCleanColumns)
self.cleanTestX = self.neatData.cleanTestDataset(testX)
self.cleanTestY = self.neatData.convertYToNumbersForModeling(testY)
def _modelFit(self):
\"\"\")
showNextLines = False
with open('modelpipeline.py', 'a') as fileOut:
with open('tpot_pipeline.py', 'r') as fileIn:
for line in fileIn:
if line.startswith("# Score"):
showNextLines = True
elif showNextLines and not line.startswith("exported_pipeline.fit") and not line.startswith("results"):
fileOut.write(" " + line)
with open('modelpipeline.py', 'a') as fileOut:
fileOut.write(\"\"\" self.exported_pipeline = exported_pipeline
self.exported_pipeline.fit(self.cleanTrainX, self.cleanTrainY)
self.results = self.exported_pipeline.predict(self.cleanTestX)
def _printModelScores(self):
print("Confusion Matrix:")
print(confusion_matrix(self.cleanTestY, self.results))
print(accuracy_score(self.cleanTestY, self.results))
def _saveObjectsToDisk(self):
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
save_object(self.exported_pipeline, 'exportedPipeline.pkl')
save_object(self.neatData, 'NeatData.pkl')
def _createTrainedModelPipelineFile(self):
with open('trainedmodelpipeline.py', 'w') as fileOut:
fileOut.write(\\\"\\\"\\\"
import pandas as pd
import pickle
class TrainedModelPipeline:
def __init__(self):
self.exportedPipeline = None
self.neatData = None
self.testX = None
self.cleanTestX = None
self.results = None
self.resultsDf = None
def execute(self):
self._loadObjects()
self._getDataset()
self._cleanDataset()
self._predict()
self._concatenatePredictionsToDataframe()
self._saveResultsAsCSV()
print("Done. Created results.csv")
def _loadObjects(self):
with open('exportedPipeline.pkl', 'rb') as input:
self.exportedPipeline = pickle.load(input)
with open('NeatData.pkl', 'rb') as input:
self.neatData = pickle.load(input)
def _getDataset(self):
self.testX = pd.read_csv('test.csv') # Edit: Your dataset
# self.testX = pd.read_csv('test.csv', header=None)
def _cleanDataset(self):
self.cleanTestX = self.neatData.cleanTestDataset(self.testX)
def _predict(self):
self.results = self.exportedPipeline.predict(self.cleanTestX)
self.results = self.neatData.convertYToStringsOrNumbersForPresentation(self.results)
def _concatenatePredictionsToDataframe(self):
self.resultsDf = pd.DataFrame(self.results)
self.resultsDf = pd.concat([self.testX, self.resultsDf], axis=1)
def _saveResultsAsCSV(self):
self.resultsDf.to_csv('./results.csv')
trainedModelPipeline = TrainedModelPipeline()
trainedModelPipeline.execute()
\\\"\\\"\\\")
modelPipeline = ModelPipeline()
modelPipeline.execute()
\"\"\")
print("Done creating modelpipeline.py")
"""
nb['cells'] = [nbf.v4.new_markdown_cell(header1),
nbf.v4.new_code_cell(code1),
nbf.v4.new_markdown_cell(header2),
nbf.v4.new_code_cell(code2),
nbf.v4.new_markdown_cell(header3),
nbf.v4.new_code_cell(code3),
nbf.v4.new_markdown_cell(header4),
nbf.v4.new_code_cell(code4),
nbf.v4.new_markdown_cell(header5),
nbf.v4.new_code_cell(code5),
nbf.v4.new_markdown_cell(header6),
nbf.v4.new_code_cell(code6) ]
fname = '{}.ipynb'.format(PROJECT_PATH + PROJECT_NAME.capitalize() + "_Neatbook")
if not os.path.isfile(fname):
with open(fname, 'w') as f:
nbf.write(nb, f)
|
py | 1a373d4210692e47e89b694aea9415beecd1d66f | # -*- coding: utf-8 -*-
import DIRAC
from DIRAC import gLogger
from DIRAC.Core.Base import Script
Script.setUsageMessage("""
Download LFNs in a dataset
Usage:
%s <dataset name>
""" % Script.scriptName)
Script.registerSwitch("", "save=", "The directory which save files.")
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if (len(args) != 1):
gLogger.error("Please support the dataset name")
DIRAC.exit(-1)
dataset = args[0]
dir_save = args[0]
for k,v in Script.getUnprocessedSwitches():
if k.lower() in ["save"]:
dir_save = v
gLogger.info("Dataset Name: ", dataset)
gLogger.info("Save in: ", dir_save)
# Get the list of LFNs in one dataset
from DIRAC.Core.DISET.RPCClient import RPCClient
transferRequest = RPCClient("Transfer/Dataset")
res = transferRequest.list(dataset)
if not res["OK"]:
gLogger.error(res)
DIRAC.exit(-1)
file_list = [v[1] for v in res["Value"]]
gLogger.debug("File List", file_list)
# Begin to save file
# Refer to dirac-dms-get-file.py in DIRAC/Interfaces/scripts
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
res = dirac.getFile( file_list, destDir = dir_save, printOutput = True )
if not res["OK"]:
gLogger.error(res)
DIRAC.exit(-1)
DIRAC.exit(0)
|
py | 1a373e067d2cfe00189c8bb8665409f775831273 | #!/usr/bin/python
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import serial
import sys
import time
from demo_plot_defs import *
from other_funcs import get_freq_response, get_phase_response
if (sys.argv[1] == "help"):
print ("usage: demo_plot.py MIN MAX options...")
print ("MIN MAX define the range of frequencies to test")
print ("Possible options: linear, phase, calibrate.")
print ("linear will produce a linear plot instead of a logarithmic one")
print ("phase will produce a phase response in addition to the frequency response plot")
print ("calibrate is recommended and allows you to run an extra test with just a copper wire to better calibrate the final output.")
print ("")
sys.exit (1)
elif len(sys.argv) < 3:
print ("usage: demo_plot.py MIN MAX options...")
sys.exit (1)
try:
float (sys.argv[1])
float (sys.argv[2])
except ValueError:
print ("usage: demo_plot.py MIN MAX options...")
print ("MIN and MAX must be floating point!")
sys.exit (1)
#Initialize
s = connect_gpa()
mc_init(s)
synth_init(s)
frontend_init(s)
lower_bound = float (sys.argv[1])
upper_bound = float (sys.argv[2])
freqs_f = np.logspace(np.log10(lower_bound), np.log10(upper_bound), 60) # 1 kHz to 150 MHz
freqs_p = np.logspace(np.log10(lower_bound), np.log10(upper_bound), 30) # 1 kHz to 150 MHz
data_f = []
data_p = []
data_calibrate_f = []
data_calibrate_p = []
if "calibrate" in sys.argv:
input ("Please double check that the wire is connected and press Enter...")
data_calibrate_f = get_freq_response(s, lower_bound, upper_bound, freqs_f)
if "phase" in sys.argv:
data_calibrate_p = get_phase_response(s, lower_bound, upper_bound, freqs_p)
input ("Now connect your filter for testing and press Enter ...")
data_f = get_freq_response(s, lower_bound, upper_bound, freqs_f)
if "calibrate" in sys.argv:
for i in range(len(data_f)):
data_f[i] = data_f[i] - data_calibrate_f[i]
plt.subplot(2, 1, 1)
#ax = plt.axes(xlim=(1e3, 1e9))
if 'linear' in sys.argv:
plot, = plt.plot (freqs_f, data_f)
else:
plot, = plt.semilogx (freqs_f, data_f)
if "phase" not in sys.argv:
plt.xlabel ("Frequency (Hz)")
plt.ylabel ("Amplitude (dB, calibrated)")
plt.title ("Voltage Insertion Gain, calibrated")
plt.grid (True)
if "phase" in sys.argv:
data_p = get_phase_response(s, lower_bound, upper_bound, freqs_p)
if "calibrate" in sys.argv:
for i in range(len(data_p)):
data_p[i] = data_p[i] - data_calibrate_p[i]
plt.subplot(2, 1, 2)
#ax = plt.axes(xlim=(1e3, 1e9))
if 'linear' in sys.argv:
plot, = plt.plot (freqs_p, data_p)
else:
plot, = plt.semilogx (freqs_p, data_p)
plt.xlabel ("Frequency (Hz)")
plt.ylabel ("Phase (deg, calibrated)")
plt.title ("Phase Shift, calibrated")
plt.grid (True)
plt.savefig('out.png')
plt.show ()
|
py | 1a373e326d5559a7f1c5192fdf626d1af4391837 | class HomieDevice:
def __init__( self, id, name ):
self.id = id
self.name = name
|
py | 1a373ed11524f98d44802a932f11157f7a16406b | import os
import discord
from discord import Embed
import settings
def prune_participant_name(name):
name = name.split(' ')[0]
name = name.split('/')[0]
name = name.split('\\')[0]
name = name.split('-')[0]
name = name.split('(')[0]
name = name.split(')')[0]
name = name.split('+')[0]
name = name.split('&')[0]
name = name.title()
return name
def fuzzy_string_match(first, second):
if len(first) > 3:
return second.lower() in first.lower()
else:
return first.lower() == second.lower()
async def send_announcement(ctx, announcement):
for channel in ctx.guild.channels:
if channel.name.lower() == settings.ANNOUNCEMENT_CHANNEL_NAME.lower():
await channel.send(announcement)
break
def extract_identifier(member):
for role in member.roles:
if role.name.title() in settings.IDENTIFIERS:
return role.name.title()
return None
def extract_role(member, identifier):
for role in member.roles:
if role.name.title() in settings.ROLES.ALL:
return role.name.title()
return settings.ROLES.from_identifier_default(identifier.title())
async def get_event_message(channel, client):
def is_event_message(m):
# Look for a message that has an embed with a footer that contains the id of the bot
if len(m.embeds) > 0:
footer = m.embeds[0].footer
return False if footer is Embed.Empty else str(client.user.id) == m.embeds[0].footer.text
return False
# Check if the bot has an event message in this channel already
event_message = await channel.history().find(is_event_message)
return event_message
async def show_event(channel, client, embed, new_event=False):
def is_event_message(m):
# Look for a message that has an embed with a footer that contains the id of the bot
if len(m.embeds) > 0:
footer = m.embeds[0].footer
return False if footer is Embed.Empty else str(client.user.id) == m.embeds[0].footer.text
await channel.purge(check=lambda m: not is_event_message(m))
event_message = await get_event_message(channel, client)
if event_message is None:
event_message = await channel.send(embed=embed)
new_event = True
else:
await event_message.edit(embed=embed)
if new_event:
await event_message.clear_reactions()
await event_message.add_reaction(emoji=settings.SIGNUP_REACTION)
await event_message.add_reaction(emoji=settings.DECLINE_REACTION)
def log(*args):
is_logging_active = os.getenv('LOGGING')
if is_logging_active:
print(*args)
|
py | 1a373fbffc4011b726cf21b3b2fc640ee3682a04 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 19 09:04:27 2018
@author: jeremiasknoblauch
Description: Well-log data processing
"""
"""System packages/modules"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import csv
import datetime
import matplotlib
"""Modules of the BOCPDMS algorithm"""
from cp_probability_model import CpModel
from BVAR_NIG import BVARNIG
from BVAR_NIG_DPD import BVARNIGDPD
from detector import Detector
from Evaluation_tool import EvaluationTool
baseline_working_directory = ("//Users//jeremiasknoblauch//Documents//OxWaSP"+
"//BOCPDMS/Code//SpatialBOCD//Data//well log")
well_file = baseline_working_directory + "//well.txt"
mode = "DPD" #Two modes available:
#"DPD" -> Density Power Divergence, which is
# the same as the beta-divergence.
#"KL" -> Kullback Leibler Divergence, i.e. standard bayes
"""STEP 1: Read in the nile data from well.txt"""
raw_data = []
count = 0
with open(well_file) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
raw_data += row
raw_data_float = []
for entry in raw_data:
raw_data_float.append(float(entry))
raw_data = raw_data_float
"""STEP 2: Format the data so that it can be processed with a Detector
object and instantiations of ProbabilityModel subclasses"""
T = int(len(raw_data))
S1, S2 = 1,1 #S1, S2 give you spatial dimensions
data = np.array(raw_data).reshape(T,1,1)
"""STEP 3: Set up the optimization parameters. These only apply if mode = "DPD"
since the standard Bayesian inference is exact"""
VB_window_size = 360 #W in pseudo-code of paper
full_opt_thinning = 20 #i.e., every when the run-length is divisible by 20,
#we perform a full optimization step
SGD_approx_goodness = 10 #In the pure SGD steps, how big is your batch
anchor_approx_goodness_SVRG = 50 #in the SVRG steps, how big is your batch (NOT USED)
anchor_approx_goodness_SCSG = 25 #in the SCSG steps, how big is your batch
first_opt = 10
alpha_param_opt_t = 0 #Indicates how many time period you wait before learning
#about beta (alpha in DPD notation of Basu et al. ('98))
"""STEP 4: Set up the priors for the model universe's elements"""
a, b = 1, pow(10,7)
alpha_param = 0.05 #Initialization for the parameter-beta (alpha)
alpha_rld = 0.0001 #Initialization for the run-length-beta (alpha)
rld_DPD = "power_divergence" #The run-length robustification can be set inde-
#pendently from the parameter robustness.
a_KL, b_KL = 1, pow(10,4)
rld_KL = "kullback_leibler" #The run-length robustification can be set inde-
#pendently from the parameter robustness.
rld_learning = True #Whether or not we learn about the beta (alpha)
#robustifying the run-length (if rld = "power_divergence")
param_learning = "individual" #Irrelevant for well-log because we only run
#a single model.
#Set Prior mean and variance scales
prior_mean_scale, prior_var_scale = np.mean(data), 0.25
cp_intensity = 100 # cp_intensity = k => prior prob P(CP at time t) = 1/k
np.random.seed(999) #To exactly reproduce paper results/pictures
"""STEP 5: Create models"""
model_universe_DPD = []
model_universe_KL = []
model_universe_DPD = model_universe_DPD + [BVARNIGDPD(
prior_a=a,
prior_b=b, #b,
S1=S1,
S2=S2,
alpha_param = alpha_param,
prior_mean_beta=None,
prior_var_beta=None,
prior_mean_scale=prior_mean_scale,
prior_var_scale=prior_var_scale,
nbh_sequence=None,
restriction_sequence = None,
hyperparameter_optimization = "online",
VB_window_size = VB_window_size,
full_opt_thinning = full_opt_thinning,
SGD_batch_size = SGD_approx_goodness,
anchor_batch_size_SCSG = anchor_approx_goodness_SCSG,
anchor_batch_size_SVRG = None,
first_full_opt = first_opt
)]
model_universe_KL = model_universe_KL + [BVARNIG(
prior_a = a_KL,
prior_b = b_KL,
S1 = S1,
S2 = S2,
prior_mean_scale = prior_mean_scale,
prior_var_scale = prior_var_scale,
nbh_sequence = None,
restriction_sequence = None,
hyperparameter_optimization = "online"
)]
"""STEP 6: Set up the detectors from this"""
model_universe_DPD = np.array(model_universe_DPD)
model_universe_KL = np.array(model_universe_KL)
model_prior = np.array([1.0/len(model_universe_DPD)]*len(model_universe_DPD))
cp_model = CpModel(cp_intensity)
detector_DPD = Detector(
data=data,
model_universe=model_universe_DPD,
model_prior = model_prior,
cp_model = cp_model,
S1 = S1,
S2 = S2,
T = T,
store_rl=True,
store_mrl=True,
trim_type="keep_K",
threshold = 50,
notifications = 100,
save_performance_indicators = True,
generalized_bayes_rld = rld_DPD,
alpha_param_learning = param_learning,
alpha_param = alpha_param,
alpha_param_opt_t = 100,
alpha_rld = alpha_rld,
alpha_rld_learning = rld_learning,
loss_der_rld_learning="absolute_loss"
)
detector_DPD.run()
detector_KL = Detector(
data=data,
model_universe=model_universe_KL,
model_prior = model_prior,
cp_model = cp_model,
S1 = S1,
S2 = S2,
T = T,
store_rl=True,
store_mrl=True,
trim_type="keep_K",
threshold = 50,
notifications = 100,
save_performance_indicators = True,
generalized_bayes_rld = rld_KL,
alpha_param_learning = param_learning,
alpha_param = alpha_param,
alpha_param_opt_t = 100,
alpha_rld = alpha_rld,
alpha_rld_learning = rld_learning,
loss_der_rld_learning="absolute_loss"
)
detector_KL.run()
"""STEP 7: Make graphing tool"""
EvTDPD = EvaluationTool()
EvTDPD.build_EvaluationTool_via_run_detector(detector_DPD)
EvTKL = EvaluationTool()
EvTKL.build_EvaluationTool_via_run_detector(detector_KL)
"""STEP 8: Plotting Pictures in paper"""
matplotlib.rcParams.update({'figure.autolayout': False})
"""Get the different CPs"""
CPsDPD = np.array([e[0] for e in EvTDPD.results[EvTDPD.names.index("MAP CPs")][-2]])
CPsKL = np.array([e[0] for e in EvTKL.results[EvTKL.names.index("MAP CPs")][-2]])
k = 25
additional_CPs = []
for cp_kl in CPsKL:
lower = CPsDPD - k < cp_kl
upper = CPsDPD + k > cp_kl
if (not np.any(lower == upper)):
additional_CPs.append([cp_kl,0])
height_ratio =[10,4,8]
KL_CP_color = "crimson"
DPD_CP_color = "darkblue"
max_color_KL = "red"
max_color_DPD = "blue"
max_width = 1
CP_linewidth_DPD = 2
CP_linewidth_KL = 1
CP_style_KL = (0,(1,2.25))
CP_style_DPD = "solid"
CP_transparence_KL = 0.75
CP_transparence_DPD = 0.5
show_CPs_in_rld = False
xlabsize, ylabsize, ticksize = 15,15,12
fig, ax_array = plt.subplots(3,
figsize=(18,10),
sharex = True,
gridspec_kw = {'height_ratios':height_ratio})
fig.subplots_adjust(hspace = .05,
left = None, bottom = None,
right = None, top = None)
ylabel_coords = [0.0, 0.25]
EvTDPD.plot_raw_TS(data.reshape(T,S1*S2), indices = [0], xlab = None,
show_MAP_CPs = True,
time_range = np.linspace(1,T, T, dtype=int),
print_plt = False,
ylab = "Response",
ax = ax_array[0],
custom_colors_series = ["black"]*5,
custom_colors_CPs = [DPD_CP_color]* 100,
custom_linestyles = [CP_style_DPD]*100,
custom_linewidth = CP_linewidth_DPD,
custom_transparency = CP_transparence_DPD,
ylab_fontsize = ylabsize,
yticks_fontsize = ticksize,
ylabel_coords = [-0.06,0.5],
additional_CPs = additional_CPs,
custom_colors_additional_CPs = [KL_CP_color] * 100,
custom_linewidth_additional_CPs = CP_linewidth_KL,
custom_linestyles_additional_CPs = [CP_style_KL] * 10,
custom_transparency_additional_CPs = CP_transparence_KL)
EvTDPD.plot_run_length_distr(buffer=0, show_MAP_CPs = show_CPs_in_rld,
mark_median = False,
mark_max = True,
upper_limit = 1300,
print_colorbar = False,
colorbar_location= None,
xlab = "",
ylab = "",
log_format = False, aspect_ratio = 'auto',
time_range = np.linspace(1,
T-2,
T-2, dtype=int),
start = 1, stop = T,
all_dates = None,
custom_colors = [DPD_CP_color] * 30,
custom_linestyles = [CP_style_DPD]*30,
custom_linewidth = CP_linewidth_DPD,
xlab_fontsize = xlabsize,
ylab_fontsize = ylabsize,
xticks_fontsize = ticksize,
yticks_fontsize = ticksize,
ax = ax_array[1], figure = fig,
no_transform = True,
date_instructions_formatter = None,
date_instructions_locator = None,
arrow_distance = 25,
mark_max_linewidth = max_width,
mark_max_color = max_color_DPD)
EvTKL.plot_run_length_distr(buffer=0, show_MAP_CPs = show_CPs_in_rld,
mark_median = False,
mark_max = True, upper_limit = 1200,
print_colorbar = True,
colorbar_location= 'bottom',
space_to_colorbar = 0.6,
log_format = False, aspect_ratio = 'auto',
C1=0,C2=700,
time_range = np.linspace(1,
T-2,
T-2, dtype=int),
start = 1, stop = T,
all_dates = None,
custom_colors = [KL_CP_color] * 30,
custom_linestyles = [CP_style_KL]*30,
custom_linewidth = CP_linewidth_KL,
xlab_fontsize =xlabsize,
ylab_fontsize = ylabsize,
xticks_fontsize = ticksize,
yticks_fontsize = ticksize,
ylabel_coords = [-0.06, 1.25],
ax = ax_array[2], figure = fig,
no_transform = True,
date_instructions_formatter = None,
date_instructions_locator = None,
xlab = "Time",
ylab = "run length",
arrow_distance = 25,
mark_max_linewidth = max_width,
mark_max_color = max_color_KL)
fig.savefig(baseline_working_directory + "//well.pdf",
format = "pdf", dpi = 800)
fig.savefig(baseline_working_directory + "//well.jpg",
format = "jpg", dpi = 800)
"""STEP 9: Plot some performance metrics"""
def abs_loss_lim(x, lim):
x[np.where(x >= lim)] = lim
return np.abs(x)
sd = np.sqrt(np.var(data))
print("CPs are ", detector_DPD.CPs[-2])
train = 0
until = -2
resids = (data[1:] - EvTKL.results[10].reshape(T,1)[:-1])[train:until]
print("summary MSE KL:",
np.mean(np.power((data[1:] -
EvTKL.results[10].reshape(T,1)[:-1])[train:until],2)))
print("summary MAE KL:",
np.mean(np.abs((data[1:] -
EvTKL.results[10].reshape(T,1)[:-1])[train:until])))
resids = (data - EvTDPD.results[10].reshape(T,1)[:-1])[train:until]
print("summary MSE DPD:",
np.mean(np.power(((data -
EvTDPD.results[10].reshape(T,1)[:-1]))[train:until],2)))
print("summary MAE DPD:",
np.mean(np.abs(((data -
EvTDPD.results[10].reshape(T,1)[:-1]))[train:until])))
|
py | 1a373fdbfbdad892a1a0a2f6de27240f5da35654 | from django.http import HttpResponseForbidden
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from boxes.authorization import load_can_edit
from boxes.forms import BoxForm
from boxes.models import Box
# @@@ problem with this is that the box_edit.html and box_create.html won't have domain objects in context
def get_auth_vars(request):
auth_vars = {}
if request.method == "POST":
keys = [k for k in request.POST.keys() if k.startswith("boxes_auth_")]
for key in keys:
auth_vars[key.replace("boxes_auth_", "")] = request.POST.get(key)
auth_vars["user"] = request.user
return auth_vars
def box_edit(request, pk):
box = get_object_or_404(Box, pk=pk)
if request.method == "POST":
#if not load_can_edit()(request, **get_auth_vars(request)):
# return HttpResponseForbidden()
form = BoxForm(request.POST, instance=box)
if form.is_valid():
form.save()
return render_to_response("boxes/refresh.html", {})
else:
form = BoxForm(instance=box)
ctx = {
"form": form,
"box": box,
}
ctx = RequestContext(request, ctx)
return render_to_response("boxes/box_edit.html", ctx)
def box_create(request, label):
if request.method == "POST":
#if not load_can_edit()(request, **get_auth_vars(request)):
# return HttpResponseForbidden()
form = BoxForm(request.POST)
if form.is_valid():
box = form.save(commit=False)
box.label = label
box.created_by = request.user
box.last_updated_by = request.user
box.save()
return render_to_response("boxes/refresh.html", {})
else:
form = BoxForm()
ctx = {
"form": form,
"label": label
}
ctx = RequestContext(request, ctx)
return render_to_response("boxes/box_create.html", ctx)
|
py | 1a37402e20b292b2474b0f610555cc77934dd5b7 | import sst
import os
sst.setProgramOption("timebase", "1ps")
sst_root = os.getenv( "SST_ROOT" )
app = sst_root + "/sst-elements/src/sst/elements/ariel/frontend/simple/examples/stream/stream"
if not os.path.exists(app):
app = os.getenv( "OMP_EXE" )
l2PrefetchParams = {
"prefetcher": "cassini.StridePrefetcher",
"reach": 8
}
ariel = sst.Component("a0", "ariel.ariel")
ariel.addParams({
"verbose" : "0",
"maxcorequeue" : "256",
"maxissuepercycle" : "2",
"pipetimeout" : "0",
"executable" : app,
"arielmode" : "1",
"launchparamcount" : 1,
"launchparam0" : "-ifeellucky",
})
memmgr = ariel.setSubComponent("memmgr", "ariel.MemoryManagerSimple")
corecount = 1;
l1cache = sst.Component("l1cache", "memHierarchy.Cache")
l1cache.addParams({
"cache_frequency" : "2 Ghz",
"cache_size" : "64 KB",
"coherence_protocol" : "MSI",
"replacement_policy" : "lru",
"associativity" : "8",
"access_latency_cycles" : "1",
"cache_line_size" : "64",
"L1" : "1",
"debug" : "0",
})
memctrl = sst.Component("memory", "memHierarchy.MemController")
memctrl.addParams({
"clock" : "1GHz",
})
memory = memctrl.setSubComponent("backend", "memHierarchy.simpleMem")
memory.addParams({
"access_time" : "10ns",
"mem_size" : "2048MiB",
})
cpu_cache_link = sst.Link("cpu_cache_link")
cpu_cache_link.connect( (ariel, "cache_link_0", "50ps"), (l1cache, "high_network_0", "50ps") )
memory_link = sst.Link("mem_bus_link")
memory_link.connect( (l1cache, "low_network_0", "50ps"), (memctrl, "direct_link", "50ps") )
# Set the Statistic Load Level; Statistics with Enable Levels (set in
# elementInfoStatistic) lower or equal to the load can be enabled (default = 0)
sst.setStatisticLoadLevel(5)
# Set the desired Statistic Output (sst.statOutputConsole is default)
sst.setStatisticOutput("sst.statOutputConsole")
#sst.setStatisticOutput("sst.statOutputTXT", {"filepath" : "./TestOutput.txt"
# })
#sst.setStatisticOutput("sst.statOutputCSV", {"filepath" : "./TestOutput.csv",
# "separator" : ", "
# })
# Enable Individual Statistics for the Component with output at end of sim
# Statistic defaults to Accumulator
ariel.enableStatistics([
"cycles",
"instruction_count",
"read_requests",
"write_requests"
])
l1cache.enableStatistics([
"CacheHits",
"CacheMisses"
])
|
py | 1a3740d0e16f42890ca3cf05543e155443b3f9c5 | import taso as ts
import onnx
import os
import argparse
import re
def squeeze(graph, out_channels, input):
weight = graph.new_weight(dims=(out_channels, input.dim(1), 1, 1))
return graph.conv2d(input=input, weight=weight,
strides=(1, 1), padding="SAME",
activation="RELU")
def fit(graph, current, input):
if input.dim(2) == current.dim(2):
return squeeze(graph, current.dim(1), input)
else:
weight = graph.new_weight(dims=(current.dim(1), input.dim(1), 3, 3))
return graph.conv2d(input=input, weight=weight, strides=(2, 2), padding="SAME", activation="RELU")
def seperable_conv(graph, input, out_channels, kernels, strides, padding, activation = "NONE"):
assert input.dim(1) % out_channels == 0, "input.dim(1)={}, out_channels={}".format(input.dim(1), out_channels)
weight1 = graph.new_weight(dims=(out_channels, input.dim(1) // out_channels, kernels[0], kernels[1]))
t = graph.conv2d(input=input, weight=weight1, strides=strides, padding=padding)
weight2 = graph.new_weight(dims=(out_channels, t.dim(1), 1, 1))
return graph.conv2d(input=t, weight=weight2, strides=(1, 1), padding="SAME", activation=activation)
def normal_cell(graph, prev, cur, out_channels):
cur = squeeze(graph, out_channels, cur)
prev = fit(graph, cur, prev)
ts = list()
ts.append(seperable_conv(graph, input=cur, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(cur)
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(seperable_conv(graph, input=cur, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(graph.avgpool2d(input=cur, kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(prev)
ts.append(graph.avgpool2d(input=prev, kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(graph.avgpool2d(input=prev, kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
assert len(ts) == 10, "Expected 10 tensors, got {}".format(len(ts))
outputs = list()
for i in range(5):
outputs.append(graph.add(ts[2*i], ts[2*i+1]))
return graph.concat(1, outputs)
def reduction_cell(graph, prev, cur, out_channels):
cur = squeeze(graph, out_channels, cur)
prev = fit(graph, cur, prev)
ts = list()
outputs = list()
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(7,7), strides=(2,2), padding="SAME"))
ts.append(seperable_conv(graph, input=cur, out_channels=out_channels,
kernels=(5,5), strides=(2,2), padding="SAME"))
outputs.append(graph.add(ts[0], ts[1]))
ts.append(graph.maxpool2d(input=cur, kernels=(3,3), strides=(2,2), padding="SAME"))
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(7,7), strides=(2,2), padding="SAME"))
outputs.append(graph.add(ts[2], ts[3]))
ts.append(graph.avgpool2d(input=cur, kernels=(3,3), strides=(2,2), padding="SAME"))
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(5,5), strides=(2,2), padding="SAME"))
outputs.append(graph.add(ts[4], ts[5]))
ts.append(graph.maxpool2d(input=cur, kernels=(3,3), strides=(2,2), padding="SAME"))
ts.append(seperable_conv(graph, input=outputs[0], out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
outputs.append(graph.add(ts[6], ts[7]))
ts.append(graph.avgpool2d(input=outputs[0], kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(outputs[1])
outputs.append(graph.add(ts[8], ts[9]))
return graph.concat(1, outputs)
#here we need to parse arguments
parser = argparse.ArgumentParser()
# parser.add_argument("-a", "--alpha", help="alpha", default = 1.05)
parser.add_argument("-b", "--budget", help="budget", required=True)
# parser.add_argument("-s", "--sample_size", help="sample_size")
# parser.add_argument("-n", "--block_num", help="block_num", required = True)
parser.add_argument("-c", "--cuda", help="cuda device", default = 0)
parser.add_argument("-r", "--runtimes", help="the number of runs required", required = True)
parser.add_argument("-m", "--method", help="the method to use", required = True)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(int(args.cuda))
budget=int(args.budget)
# block_num=int(args.block_num)
runtimes=int(args.runtimes)
methods=int(args.method)
# BUILD THE ORIGINAL GRAPH
graph = ts.new_graph()
input = graph.new_input(dims=(1,3,224,224))
weight = graph.new_weight(dims=(64,3,7,7))
input = graph.conv2d(input=input, weight=weight, strides=(2,2),
padding="SAME", activation="RELU")
input = graph.maxpool2d(input=input, kernels=(3,3), strides=(2,2), padding="SAME")
out_channels = 128
for i in range(3):
prev = input
cur = input
for j in range(5):
t = normal_cell(graph, prev, cur, out_channels)
prev = cur
cur = t
out_channels *= 2
input = reduction_cell(graph, prev, cur, out_channels)
# new_graph = ts.optimize(graph, alpha=1.0, budget=-1)
import timeit
# this helper function write "to_write" to the file
def write_result(to_write):
f = open('results.py','a')
f.write(to_write)
f.close()
def get_memory():
my_pid = os.getpid()
print(os.system("grep VmHWM /proc/" + str(my_pid)+ "/status > memory.txt"))
print(os.system("grep VmHWM /proc/" + str(my_pid)+ "/status"))
print(str(my_pid))
f2 = open("memory.txt","r")
lines = f2.readlines()
for line3 in lines:
pattern = r'VmHWM:\s*([0-9]+) kB'
matchObj = re.match(pattern, line3)
memory = int(matchObj.group(1))
break
return memory
# repeat_time = 1
# DO OPTIMIZATION AND RECORD RESULTS
# write_result('all_results = dict()\n')
# write_result('\nall_results["sysmlpartition"] = dict()\n')
# write_result('\nall_results["sysmltrick"] = dict()\n')
# write_result('\nall_results["sampletrick"] = dict()\n')
# write_result('\nall_results["sampletrick_truenewreuse"] = dict()\n')
# write_result('\nall_results["reuse"] = dict()\n')
# write_result('\nall_results["prune"] = dict()\n')
write_result('\nall_results = model_results["nasneta' + 'b' + str(budget) + '"]\n')
for repeat_time in range(runtimes, runtimes+1):
write_result('\nrepeat_time = ' + str(repeat_time) + '\n')
# # for sampletrick with true new reuse
# # RUN THIS ALGORITHM TO PREPARE THE OP_DICT
# if ((methods == -1) or (methods == 1)):
# # write_result('result = dict()\n')
# write_result('result = all_results["sampletrick_truenewreuse"][repeat_time]\n')
# new_graph = ts.optimize_sampletrick_truenewreuse(graph, 3, alpha=1.05, budget=budget, print_subst = False, sample_size = 20)
# #record the peak memory
# write_result('result["memory"] = ' + str(get_memory()) + '\n')
# # write_result('all_results["sampletrick_truenewreuse"][repeat_time] = result\n')
# write_result('all_results["sysmlpartition"][repeat_time] = dict()\n')
if ((methods == -1) or (methods == 2)):
# write_result('result = dict()\n')
write_result('result = all_results["sysmlpartition"][repeat_time]\n')
threshold = 30
partitions = list()
#
start_time = timeit.default_timer()
ts.graph_partition(graph, threshold, partitions = partitions)
end_time = timeit.default_timer()
write_result('result["partition_time"] = ' + str(end_time-start_time) + '\n')
#
new_graph = ts.optimize_partition(graph, alpha = 1.05, budget = budget, print_subst = True, eraly_stop_num = -1, partitions = partitions)
#record the peak memory
write_result('result["memory"] = ' + str(get_memory()) + '\n')
# write_result('all_results["sysmlpartition"][repeat_time] = result\n')
# for sysmltrick without partition
if ((methods == -1) or (methods == 3)):
# write_result('result = dict()\n')
write_result('result = all_results["sysmltrick"][repeat_time]\n')
new_graph = ts.optimize_sysmltrick(graph, alpha = 1.05, budget = budget, print_subst = False, eraly_stop_num = -1)
#record the peak memory
write_result('result["memory"] = ' + str(get_memory()) + '\n')
# write_result('all_results["sysmltrick"][repeat_time] = result\n')
# for sampletrick
if ((methods == -1) or (methods == 4)):
# write_result('result = dict()\n')
write_result('result = all_results["sampletrick_optimized"][repeat_time]\n')
# new_graph = ts.optimize_sampletrick(graph, alpha=1.05, budget=budget, print_subst = False, sample_size = 20)
new_graph = ts.optimize_sampletrick_newreuse_2samplestep(graph, alpha=1.05, budget=budget, print_subst = False, sample_size = 20)
#record the peak memory
write_result('result["memory"] = ' + str(get_memory()) + '\n')
# write_result('all_results["sampletrick"][repeat_time] = result\n')
# # for reuse
# write_result('result = dict()\n')
# new_graph = ts.optimize_reuse(graph, alpha=1.05, budget=budget, print_subst = True)
# write_result('all_results["reuse"][repeat_time] = result\n')
# # for prune
# write_result('result = dict()\n')
# new_graph = ts.optimize_prune(graph, alpha=1.05, budget=budget, print_subst = True)
# write_result('all_results["prune"][repeat_time] = result\n')
# STORE THE RESULTS IN THE MODEL_RESULTS VAR
# write_result('\nmodel_results["nasneta' + 'b' + str(budget) + '"] = all_results\n')
|
py | 1a3741ae2f6b5685eab1a1f73bba8287e77fa831 | from Task import Task
from Settings import EvolvedConfig
from Interfaces import Evolved5gJenkinsApi
from Helper import Level
class JenkinsBase(Task):
def __init__(self, name, parent, params, logMethod):
super().__init__(name, parent, params, logMethod, None)
self.config = EvolvedConfig().JenkinsApi
self.client = None
def Run(self):
try:
self.client = self.getApiClient()
except Exception as e:
self.Log(Level.Error, f"Unable to create Jenkins API client: {e}")
self.client = None
def getApiClient(self) -> Evolved5gJenkinsApi:
if not self.config.Enabled:
raise RuntimeError(f"Trying to run {self.name} Task while Jenkins API is not enabled")
return Evolved5gJenkinsApi(self.config.Host, self.config.Port,
self.config.User, self.config.Password)
class JenkinsJob(JenkinsBase):
def __init__(self, logMethod, parent, params):
super().__init__("Jenkins Job", parent, params, logMethod)
self.paramRules = {
'Instance': (None, True),
'Job': (None, True),
'GitUrl': (None, True),
'GitBranch': (None, True),
'Version': ('1.0', False),
'PublishKey': ('JenkinsJobId', False),
}
def Run(self):
super().Run()
if self.client is None: return
instance = self.params["Instance"]
job = self.params["Job"]
url = self.params["GitUrl"]
branch = self.params["GitBranch"]
version = self.params["Version"]
self.Log(Level.DEBUG,
f"Trying to trigger job '{job}' on instance '{instance}' ({url}|{branch}|{version})")
try:
jobId = self.client.TriggerJob(instance, job, url, branch, version)
self.Log(Level.INFO, f"Triggered '{job}'. Received Job Id: {jobId}")
self.Publish(self.params["PublishKey"], jobId)
except Exception as e:
self.Log(Level.ERROR, f"Unable to trigger job: {e}")
self.SetVerdictOnError()
class JenkinsStatus(JenkinsBase):
def __init__(self, logMethod, parent, params):
super().__init__("Jenkins Status", parent, params, logMethod)
self.paramRules = {
'JobId': (None, True),
'PublishKey': ('JenkinsJobStatus', False),
}
def Run(self):
super().Run()
if self.client is None: return
jobId = self.params['JobId']
try:
status, message = self.client.CheckJob(jobId)
message = message if message is not None else "<No details>"
self.Log(Level.INFO, f"Status of job '{jobId}': {status} ('{message}')")
self.Publish(self.params["PublishKey"], status)
except Exception as e:
self.Log(Level.ERROR, f"Unable to check job '{jobId}' status: {e}")
self.SetVerdictOnError()
|
py | 1a37421ca0b3826357eede8e8fceb1465be12d07 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ==============================================================================
#
# author: Gabriele Girelli
# email: [email protected]
# version: 1.1.1dev
# date: 180308
# project: pre-processing sequencing data
#
# credits:
# Dr. F. Agostini for the nice chats and for providing an initial prototype.
#
# aim:
# Deduplicate FASTQ file: remove duplicate sequence by keeping the higher
# quality one. Moreover, remove reads with "N" in the initial portion of a
# read, if requested by the user.
#
# description:
# Initially, the records in the FASTQ are quickly counted with bash "wc -l".
# Then, the full FASTQ file is read and parsed with Bio.SeqIO. Each record is
# stored in plain text format alongside its quality in a dictionary, with its
# sequence as key. If "N" (i.e., any nucleotide) is found in the initial
# portion (user-defined) of a sequence, the sequence is discarded. Each
# sequence is compared to the encountered ones and replaces it only and only
# if its quality is higher (either sum or mean). It is also possible to
# manually set an upper limit of resident memory using the --max-mem option.
#
# notes:
# The current implementation requires less RAM than previous ones, and shorter
# times to compute. Instead of storing each FASTQ record as parsed, it stores
# them as plain text alongside sequence and its quality (minor redundancy).
# For a 20 GB plain FASTQ, approx. 15 GB of resident memory are required.
#
# ==============================================================================
# DEPENDENCIES =================================================================
import argparse
import binascii
from Bio import SeqIO # type: ignore
import gzip
import numpy as np
import os
import resource
from subprocess import check_output
import sys
from tqdm import tqdm # type: ignore
# PARAMETERS ===================================================================
# Add script description
parser = argparse.ArgumentParser(
description="""
author: Gabriele Girelli
email: [email protected]
version: 1.1.1dev
date: 180308
project: pre-processing sequencing data
credits:
Dr. F. Agostini for the nice chats and for providing an initial prototype.
aim:
Deduplicate FASTQ file: remove duplicate sequence by keeping the higher
quality one. Moreover, remove reads with "N" in the initial portion of a
read, if requested by the user.
description:
Initially, the records in the FASTQ are quickly counted with bash "wc -l".
Then, the full FASTQ file is read and parsed with Bio.SeqIO. Each record is
stored in plain text format alongside its quality in a dictionary, with its
sequence as key. If "N" (i.e., any nucleotide) is found in the initial
portion (user-defined) of a sequence, the sequence is discarded. Each
sequence is compared to the encountered ones and replaces it only and only
if its quality is higher (either sum or mean). It is also possible to
manually set an upper limit of resident memory using the --max-mem option.
notes:
The current implementation requires less RAM than previous ones, and shorter
times to compute. Instead of storing each FASTQ record as parsed, it stores
them as plain text alongside sequence and its quality (minor redundancy).
For a 20 GB plain FASTQ, approx. 15 GB of resident memory are required.
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# Add mandatory arguments
parser.add_argument(
"fastq",
type=str,
nargs=1,
help="""Path to input FASTQ file.
Both gzipped and plain FASTQ formats are supported""",
)
# Add arguments with default value
parser.add_argument(
"-n",
type=int,
nargs=1,
metavar="nt",
default=[0],
help="""Length [nt] of sequence initial portion to search for N.
Default: 0.""",
)
parser.add_argument(
"--max-mem",
type=int,
nargs=1,
metavar="MB",
help="""Upper limit (in MB) of resident memory for the deduplication
process. Use -1 for unlimited. Not compatible with MacOS. Default: -1.""",
default=[-1],
)
# Add flags
parser.add_argument(
"--use-mean-qual",
action="store_const",
dest="doMean",
const=True,
default=False,
help="Select sequences based on mean quality instead of quality sum.",
)
# Version flag
version = "1.1.1dev"
parser.add_argument(
"--version",
action="version",
version="%s v%s"
% (
sys.argv[0],
version,
),
)
# Parse arguments
args = parser.parse_args()
# Assign to in-script variables
ipath = args.fastq[0]
basename = os.path.splitext(os.path.basename(ipath))[0]
linker_length = args.n[0]
max_mem = args.max_mem[0]
if max_mem < 0:
max_mem = np.inf
def floatMean(x):
return float(np.mean(x))
def intMean(x):
return int(np.mean(x))
doMean = args.doMean
if doMean:
qCalc = floatMean
else:
qCalc = intMean
# FUNCTIONS ====================================================================
def get_mem():
# Memory profiling
# From https://goo.gl/HkfNpu
if sys.platform == "darwin":
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024.0 ** 2)
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0
def check_mem():
# Print memory profiling
print("%f MB" % (get_mem(),))
def set_default(v, default):
# Set default value for function argument
if v is None:
return default
return v
def is_gz_file(filepath):
# https://stackoverflow.com/a/47080739
with open(filepath, "rb") as test_f:
return binascii.hexlify(test_f.read(2)) == b"1f8b"
def write_output(oh, records):
"""
Write output after filtering.
Args:
oh (file/gzip): output file handle.
records (dict): records dictionary after filtering.
"""
for k in tqdm(list(records)):
# Pop to empty mem sooner
oh.write(records.pop(k)[0])
def cmp_record(rec, records, ncounter, linker_length):
"""
Compares a record to stored one, replace previous ones based on quality and
discard if "N" is present in the initial portion of the sequence.
Args:
rec (SeqRecord): single FASTQ record.
records (dict): records dictionary during filtering.
ncounter (int): number of records discarded due to "N".
linker_length (int): Length [nt] of sequence portion to search for N.
Returns:
dict: records dictionary after comparison.
"""
# Extract record's sequence, let's make it comfy
seq = str(rec.seq)
# Skip if N in linker sequence
if "N" not in seq[:linker_length]:
# Prepare record for storage
q = qCalc(rec.letter_annotations["phred_quality"])
if seq not in records.keys():
# Store record
records[seq] = (rec.format("fastq"), q)
elif q > records[seq][1]:
# Replace stored record
records[seq] = (rec.format("fastq"), q)
else:
ncounter += 1
return (records, ncounter)
def log_result(ncounter, nrecs):
print("%d records removed due to presence of 'N'." % ncounter)
print("%d records after deduplication." % nrecs)
print("Peaked at %.1f MB of resident memory." % get_mem())
def run(ih, oh, linker_length, nrecs):
"""
Run the script on the input file: remove records with Ns in the initial
portion of the sequence, if linker_length is larger than 0.
Args:
ih (file/gzip): input file handle.
oh (file/gzip): output file handle.
linker_length (int): Length [nt] of sequence portion to search for N.
nrecs (int): expected number of records.
"""
# Read all records
print("Reading and filtering...")
records = {}
ncounter = 0
# Parse FASTQ records
gen = SeqIO.parse(ih, "fastq")
with tqdm(total=nrecs) as pbar:
for i in range(nrecs):
# Compare current record with stored ones
records, ncounter = cmp_record(next(gen), records, ncounter, linker_length)
# Update progress bar
pbar.update(1)
log_result(ncounter, len(records))
# Remove duplicates and write output
print("Writing...")
write_output(oh, records)
def run_mm(ih, oh, linker_length, nrecs, max_mem=None):
"""
Run the script on the input file: remove records with Ns in the initial
portion of the sequence, if linker_length is larger than 0.
Performs resident memory profiling with upper limit set by the user.
Args:
ih (file/gzip): input file handle.
oh (file/gzip): output file handle.
linker_length (int): Length [nt] of sequence portion to search for N.
nrecs (int): expected number of records.
max_mem (int): upper resident memory limit in MB.
"""
# Default memory limit to infinity
if max_mem < 0:
max_mem = None
max_mem = set_default(max_mem, np.inf)
# Read all records
print("Reading and filtering...")
records = {}
ncounter = 0
# Parse FASTQ records
gen = SeqIO.parse(ih, "fastq")
with tqdm(total=nrecs) as pbar:
for i in range(nrecs):
# Stop when the mem limit is hit
if get_mem() >= max_mem:
sys.exit("!ABORTED! Hit resident memory limit of %d MB." % (max_mem,))
# Compare current record with stored ones
records, ncounter = cmp_record(next(gen), records, ncounter, linker_length)
# Update progress bar
pbar.update(1)
log_result(ncounter, len(records))
# Remove duplicates and write output
print("Writing...")
write_output(oh, records)
# RUN ==========================================================================
# Log input --------------------------------------------------------------------
print("\n# fqdedup.py v%s - Single-end FASTQ deduplication" % version)
print("Input: %s" % (ipath,))
if is_gz_file(ipath):
print("! Gzipped FASTQ deduplication style.")
# Prepare to parse a gzipped FASTQ input
catter = "zcat"
opath = "%s/%s.dedup.gz" % (os.path.dirname(ipath), basename)
oh = gzip.open(opath, "wt")
ih = gzip.open(ipath, "rt")
else:
print("! Plain FASTQ deduplication style.")
# Prepare to parse a plain FASTQ input
catter = "cat"
opath = "%s/%s.dedup.fastq" % (os.path.dirname(ipath), basename)
oh = open(opath, "wt")
ih = open(ipath, "rt")
if doMean:
print("!Using average quality for sequence selection.")
else:
print("! Using quality sum for sequence selection.")
if 0 != linker_length:
print("! Discarding sequences with N in the first %d bases." % (linker_length,))
if np.inf != max_mem:
print("! Upper resident memory limit set to %d MB." % (max_mem,))
print()
# Count records in input -------------------------------------------------------
print("Counting records...")
nrecs = int(
int(check_output(["bash", "-c", "%s '%s' | wc -l" % (catter, ipath)])) / 4.0
)
print("> Found %d records." % (nrecs,))
# Run --------------------------------------------------------------------------
if np.inf == max_mem:
# No memory management
run(ih, oh, linker_length, nrecs)
else:
# With memory management
run_mm(ih, oh, linker_length, nrecs, max_mem)
# END ==========================================================================
################################################################################
|
py | 1a374234806df11439302d07c6208802aa238e59 | """Tests for certbot.cli."""
import argparse
import unittest
import os
import tempfile
import mock
import six
from six.moves import reload_module # pylint: disable=import-error
from acme import challenges
from certbot import cli
from certbot import constants
from certbot import errors
from certbot.plugins import disco
import certbot.tests.util as test_util
from certbot.tests.util import TempDirTestCase
PLUGINS = disco.PluginsRegistry.find_all()
class TestReadFile(TempDirTestCase):
'''Test cli.read_file'''
_multiprocess_can_split_ = True
def test_read_file(self):
rel_test_path = os.path.relpath(os.path.join(self.tempdir, 'foo'))
self.assertRaises(
argparse.ArgumentTypeError, cli.read_file, rel_test_path)
test_contents = b'bar\n'
with open(rel_test_path, 'wb') as f:
f.write(test_contents)
path, contents = cli.read_file(rel_test_path)
self.assertEqual(path, os.path.abspath(path))
self.assertEqual(contents, test_contents)
class ParseTest(unittest.TestCase): # pylint: disable=too-many-public-methods
'''Test the cli args entrypoint'''
_multiprocess_can_split_ = True
def setUp(self):
reload_module(cli)
@staticmethod
def _unmocked_parse(*args, **kwargs):
"""Get result of cli.prepare_and_parse_args."""
return cli.prepare_and_parse_args(PLUGINS, *args, **kwargs)
@staticmethod
def parse(*args, **kwargs):
"""Mocks zope.component.getUtility and calls _unmocked_parse."""
with test_util.patch_get_utility():
return ParseTest._unmocked_parse(*args, **kwargs)
def _help_output(self, args):
"Run a command, and return the output string for scrutiny"
output = six.StringIO()
def write_msg(message, *args, **kwargs): # pylint: disable=missing-docstring,unused-argument
output.write(message)
with mock.patch('certbot.main.sys.stdout', new=output):
with test_util.patch_get_utility() as mock_get_utility:
mock_get_utility().notification.side_effect = write_msg
with mock.patch('certbot.main.sys.stderr'):
self.assertRaises(SystemExit, self._unmocked_parse, args, output)
return output.getvalue()
@mock.patch("certbot.cli.flag_default")
def test_cli_ini_domains(self, mock_flag_default):
tmp_config = tempfile.NamedTemporaryFile()
# use a shim to get ConfigArgParse to pick up tmp_config
shim = lambda v: constants.CLI_DEFAULTS[v] if v != "config_files" else [tmp_config.name]
mock_flag_default.side_effect = shim
namespace = self.parse(["certonly"])
self.assertEqual(namespace.domains, [])
tmp_config.write(b"domains = example.com")
tmp_config.flush()
namespace = self.parse(["certonly"])
self.assertEqual(namespace.domains, ["example.com"])
namespace = self.parse(["renew"])
self.assertEqual(namespace.domains, [])
def test_no_args(self):
namespace = self.parse([])
for d in ('config_dir', 'logs_dir', 'work_dir'):
self.assertEqual(getattr(namespace, d), cli.flag_default(d))
def test_install_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('certbot.main.install'):
namespace = self.parse(['install', '--cert-path', cert,
'--key-path', 'key', '--chain-path',
'chain', '--fullchain-path', 'fullchain'])
self.assertEqual(namespace.cert_path, os.path.abspath(cert))
self.assertEqual(namespace.key_path, os.path.abspath(key))
self.assertEqual(namespace.chain_path, os.path.abspath(chain))
self.assertEqual(namespace.fullchain_path, os.path.abspath(fullchain))
def test_help(self):
self._help_output(['--help']) # assert SystemExit is raised here
out = self._help_output(['--help', 'all'])
self.assertTrue("--configurator" in out)
self.assertTrue("how a certificate is deployed" in out)
self.assertTrue("--webroot-path" in out)
self.assertTrue("--text" not in out)
self.assertTrue("--dialog" not in out)
self.assertTrue("%s" not in out)
self.assertTrue("{0}" not in out)
self.assertTrue("--renew-hook" not in out)
out = self._help_output(['-h', 'nginx'])
if "nginx" in PLUGINS:
# may be false while building distributions without plugins
self.assertTrue("--nginx-ctl" in out)
self.assertTrue("--webroot-path" not in out)
self.assertTrue("--checkpoints" not in out)
out = self._help_output(['-h'])
self.assertTrue("letsencrypt-auto" not in out) # test cli.cli_command
if "nginx" in PLUGINS:
self.assertTrue("Use the Nginx plugin" in out)
else:
self.assertTrue("(the certbot nginx plugin is not" in out)
out = self._help_output(['--help', 'plugins'])
self.assertTrue("--webroot-path" not in out)
self.assertTrue("--prepare" in out)
self.assertTrue('"plugins" subcommand' in out)
# test multiple topics
out = self._help_output(['-h', 'renew'])
self.assertTrue("--keep" in out)
out = self._help_output(['-h', 'automation'])
self.assertTrue("--keep" in out)
out = self._help_output(['-h', 'revoke'])
self.assertTrue("--keep" not in out)
out = self._help_output(['--help', 'install'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
out = self._help_output(['--help', 'revoke'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
self.assertTrue("--reason" in out)
out = self._help_output(['-h', 'config_changes'])
self.assertTrue("--cert-path" not in out)
self.assertTrue("--key-path" not in out)
out = self._help_output(['-h'])
self.assertTrue(cli.SHORT_USAGE in out)
self.assertTrue(cli.COMMAND_OVERVIEW[:100] in out)
self.assertTrue("%s" not in out)
self.assertTrue("{0}" not in out)
def test_help_no_dashes(self):
self._help_output(['help']) # assert SystemExit is raised here
out = self._help_output(['help', 'all'])
self.assertTrue("--configurator" in out)
self.assertTrue("how a certificate is deployed" in out)
self.assertTrue("--webroot-path" in out)
self.assertTrue("--text" not in out)
self.assertTrue("--dialog" not in out)
self.assertTrue("%s" not in out)
self.assertTrue("{0}" not in out)
out = self._help_output(['help', 'install'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
out = self._help_output(['help', 'revoke'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
def test_parse_domains(self):
short_args = ['-d', 'example.com']
namespace = self.parse(short_args)
self.assertEqual(namespace.domains, ['example.com'])
short_args = ['-d', 'trailing.period.com.']
namespace = self.parse(short_args)
self.assertEqual(namespace.domains, ['trailing.period.com'])
short_args = ['-d', 'example.com,another.net,third.org,example.com']
namespace = self.parse(short_args)
self.assertEqual(namespace.domains, ['example.com', 'another.net',
'third.org'])
long_args = ['--domains', 'example.com']
namespace = self.parse(long_args)
self.assertEqual(namespace.domains, ['example.com'])
long_args = ['--domains', 'trailing.period.com.']
namespace = self.parse(long_args)
self.assertEqual(namespace.domains, ['trailing.period.com'])
long_args = ['--domains', 'example.com,another.net,example.com']
namespace = self.parse(long_args)
self.assertEqual(namespace.domains, ['example.com', 'another.net'])
def test_preferred_challenges(self):
short_args = ['--preferred-challenges', 'http, tls-sni-01, dns']
namespace = self.parse(short_args)
expected = [challenges.HTTP01.typ,
challenges.TLSSNI01.typ, challenges.DNS01.typ]
self.assertEqual(namespace.pref_challs, expected)
short_args = ['--preferred-challenges', 'jumping-over-the-moon']
# argparse.ArgumentError makes argparse print more information
# to stderr and call sys.exit()
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, self.parse, short_args)
def test_server_flag(self):
namespace = self.parse('--server example.com'.split())
self.assertEqual(namespace.server, 'example.com')
def test_must_staple_flag(self):
short_args = ['--must-staple']
namespace = self.parse(short_args)
self.assertTrue(namespace.must_staple)
self.assertTrue(namespace.staple)
def test_no_gui(self):
args = ['renew', '--dialog']
stderr = six.StringIO()
with mock.patch('certbot.main.sys.stderr', new=stderr):
namespace = self.parse(args)
self.assertTrue(namespace.noninteractive_mode)
self.assertTrue("--dialog is deprecated" in stderr.getvalue())
def _check_server_conflict_message(self, parser_args, conflicting_args):
try:
self.parse(parser_args)
self.fail( # pragma: no cover
"The following flags didn't conflict with "
'--server: {0}'.format(', '.join(conflicting_args)))
except errors.Error as error:
self.assertTrue('--server' in str(error))
for arg in conflicting_args:
self.assertTrue(arg in str(error))
def test_staging_flag(self):
short_args = ['--staging']
namespace = self.parse(short_args)
self.assertTrue(namespace.staging)
self.assertEqual(namespace.server, constants.STAGING_URI)
short_args += '--server example.com'.split()
self._check_server_conflict_message(short_args, '--staging')
def _assert_dry_run_flag_worked(self, namespace, existing_account):
self.assertTrue(namespace.dry_run)
self.assertTrue(namespace.break_my_certs)
self.assertTrue(namespace.staging)
self.assertEqual(namespace.server, constants.STAGING_URI)
if existing_account:
self.assertTrue(namespace.tos)
self.assertTrue(namespace.register_unsafely_without_email)
else:
self.assertFalse(namespace.tos)
self.assertFalse(namespace.register_unsafely_without_email)
def test_dry_run_flag(self):
config_dir = tempfile.mkdtemp()
short_args = '--dry-run --config-dir {0}'.format(config_dir).split()
self.assertRaises(errors.Error, self.parse, short_args)
self._assert_dry_run_flag_worked(
self.parse(short_args + ['auth']), False)
self._assert_dry_run_flag_worked(
self.parse(short_args + ['certonly']), False)
self._assert_dry_run_flag_worked(
self.parse(short_args + ['renew']), False)
account_dir = os.path.join(config_dir, constants.ACCOUNTS_DIR)
os.mkdir(account_dir)
os.mkdir(os.path.join(account_dir, 'fake_account_dir'))
self._assert_dry_run_flag_worked(self.parse(short_args + ['auth']), True)
self._assert_dry_run_flag_worked(self.parse(short_args + ['renew']), True)
short_args += ['certonly']
self._assert_dry_run_flag_worked(self.parse(short_args), True)
short_args += '--server example.com'.split()
conflicts = ['--dry-run']
self._check_server_conflict_message(short_args, '--dry-run')
short_args += ['--staging']
conflicts += ['--staging']
self._check_server_conflict_message(short_args, conflicts)
def test_option_was_set(self):
key_size_option = 'rsa_key_size'
key_size_value = cli.flag_default(key_size_option)
self.parse('--rsa-key-size {0}'.format(key_size_value).split())
self.assertTrue(cli.option_was_set(key_size_option, key_size_value))
self.assertTrue(cli.option_was_set('no_verify_ssl', True))
config_dir_option = 'config_dir'
self.assertFalse(cli.option_was_set(
config_dir_option, cli.flag_default(config_dir_option)))
def test_encode_revocation_reason(self):
for reason, code in constants.REVOCATION_REASONS.items():
namespace = self.parse(['--reason', reason])
self.assertEqual(namespace.reason, code)
for reason, code in constants.REVOCATION_REASONS.items():
namespace = self.parse(['--reason', reason.upper()])
self.assertEqual(namespace.reason, code)
def test_force_interactive(self):
self.assertRaises(
errors.Error, self.parse, "renew --force-interactive".split())
self.assertRaises(
errors.Error, self.parse, "-n --force-interactive".split())
def test_deploy_hook_conflict(self):
with mock.patch("certbot.cli.sys.stderr"):
self.assertRaises(SystemExit, self.parse,
"--renew-hook foo --deploy-hook bar".split())
def test_deploy_hook_matches_renew_hook(self):
value = "foo"
namespace = self.parse(["--renew-hook", value,
"--deploy-hook", value,
"--disable-hook-validation"])
self.assertEqual(namespace.deploy_hook, value)
self.assertEqual(namespace.renew_hook, value)
def test_deploy_hook_sets_renew_hook(self):
value = "foo"
namespace = self.parse(
["--deploy-hook", value, "--disable-hook-validation"])
self.assertEqual(namespace.deploy_hook, value)
self.assertEqual(namespace.renew_hook, value)
def test_renew_hook_conflict(self):
with mock.patch("certbot.cli.sys.stderr"):
self.assertRaises(SystemExit, self.parse,
"--deploy-hook foo --renew-hook bar".split())
def test_renew_hook_matches_deploy_hook(self):
value = "foo"
namespace = self.parse(["--deploy-hook", value,
"--renew-hook", value,
"--disable-hook-validation"])
self.assertEqual(namespace.deploy_hook, value)
self.assertEqual(namespace.renew_hook, value)
def test_renew_hook_does_not_set_renew_hook(self):
value = "foo"
namespace = self.parse(
["--renew-hook", value, "--disable-hook-validation"])
self.assertEqual(namespace.deploy_hook, None)
self.assertEqual(namespace.renew_hook, value)
def test_max_log_backups_error(self):
with mock.patch('certbot.cli.sys.stderr'):
self.assertRaises(
SystemExit, self.parse, "--max-log-backups foo".split())
self.assertRaises(
SystemExit, self.parse, "--max-log-backups -42".split())
def test_max_log_backups_success(self):
value = "42"
namespace = self.parse(["--max-log-backups", value])
self.assertEqual(namespace.max_log_backups, int(value))
class DefaultTest(unittest.TestCase):
"""Tests for certbot.cli._Default."""
_multiprocess_can_split_ = True
def setUp(self):
# pylint: disable=protected-access
self.default1 = cli._Default()
self.default2 = cli._Default()
def test_boolean(self):
self.assertFalse(self.default1)
self.assertFalse(self.default2)
def test_equality(self):
self.assertEqual(self.default1, self.default2)
def test_hash(self):
self.assertEqual(hash(self.default1), hash(self.default2))
class SetByCliTest(unittest.TestCase):
"""Tests for certbot.set_by_cli and related functions."""
_multiprocess_can_split_ = True
def setUp(self):
reload_module(cli)
def test_webroot_map(self):
args = '-w /var/www/html -d example.com'.split()
verb = 'renew'
self.assertTrue(_call_set_by_cli('webroot_map', args, verb))
def test_report_config_interaction_str(self):
cli.report_config_interaction('manual_public_ip_logging_ok',
'manual_auth_hook')
cli.report_config_interaction('manual_auth_hook', 'manual')
self._test_report_config_interaction_common()
def test_report_config_interaction_iterable(self):
cli.report_config_interaction(('manual_public_ip_logging_ok',),
('manual_auth_hook',))
cli.report_config_interaction(('manual_auth_hook',), ('manual',))
self._test_report_config_interaction_common()
def _test_report_config_interaction_common(self):
"""Tests implied interaction between manual flags.
--manual implies --manual-auth-hook which implies
--manual-public-ip-logging-ok. These interactions don't actually
exist in the client, but are used here for testing purposes.
"""
args = ['--manual']
verb = 'renew'
for v in ('manual', 'manual_auth_hook', 'manual_public_ip_logging_ok'):
self.assertTrue(_call_set_by_cli(v, args, verb))
cli.set_by_cli.detector = None
args = ['--manual-auth-hook', 'command']
for v in ('manual_auth_hook', 'manual_public_ip_logging_ok'):
self.assertTrue(_call_set_by_cli(v, args, verb))
self.assertFalse(_call_set_by_cli('manual', args, verb))
def _call_set_by_cli(var, args, verb):
with mock.patch('certbot.cli.helpful_parser') as mock_parser:
with test_util.patch_get_utility():
mock_parser.args = args
mock_parser.verb = verb
return cli.set_by_cli(var)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
py | 1a3742c5b04273040947ceb0c71d9a2609f62b23 | import torch
from torch.nn.functional import cross_entropy
def check_accuracy(loader, model, device):
num_correct = 0
num_samples = 0
model.eval() # set model to evaluation mode
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
def train(model, loader_train, loader_val, optimizer, device, epochs=1, log=False, print_every=100):
"""
Train a model on CIFAR-10 using the PyTorch Module API.
Inputs:
- model: A PyTorch Module giving the model to train.
- optimizer: An Optimizer object we will use to train the model
- epochs: (Optional) A Python integer giving the number of epochs to train for
Returns: Nothing, but prints model accuracies during training.
"""
model = model.to(device=device)
for _ in range(epochs):
for t, (x, y) in enumerate(loader_train):
model.train()
x = x.to(device=device)
y = y.to(device=device, dtype=torch.long)
scores = model(x)
loss = cross_entropy(scores, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss.item()))
#check_accuracy(loader_val, model, device)
#print()
def eval_model(loader, model, device):
model = model.to(device=device)
pred = []
groundtruth = []
model.eval() # set model to evaluation mode
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
pred += preds.tolist()
groundtruth += y.tolist()
return pred, groundtruth |
py | 1a374383bd16ef1df0ac34efd4e310b2f925f8d1 |
# import the necessary packages
import win32gui
#import keyboard as keyboard
#import pygame as pygame
#import pythoncom
#import win32con
from PIL import ImageGrab, Image
from imutils.video import VideoStream, FPS
import numpy as np
import argparse
import imutils
import time
import cv2
import pyautogui
import logging
import keyboard
import destiny2_bot_ui_state
from destiny2_bot_osd import destiny2_bot_osd
# construct the argument parse and parse the arguments
# replace this with the url generated by the Wyze app
rtsp_url = "rtsp://wyzecampan:[email protected]/live"
ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image",
# default="test2.jpg", help="path to the input image")
# ap.add_argument("--cascade",
# default="opencv\data\haarcascades\haarcascade_frontalcatface_extended.xml",
# help="path to cat detector haar cascade")
ap.add_argument("-d", "--debug", action="store_true", default=False,
help="debugging output")
args = ap.parse_args()
if not args.debug:
logging.basicConfig(level=logging.INFO)
def main():
# initialize the video stream
# and initialize the FPS counter
#logging.info("starting video stream...")
frame = None
# src=0 is default web cam
#vs = VideoStream(src=0).start()
screenWidth, screenHeight = pyautogui.size()
screenRatio = screenWidth/screenHeight
logging.info("screenWith: {}x{}, format: {:.2f}:1".format(screenWidth,
screenHeight, screenRatio))
logging.info("Creating output window")
cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
# scaling the screen to 70% for second monitor...
cv2.resizeWindow('Output', (int(screenWidth*.70), int(screenHeight*.70)))
cv2.moveWindow('Output', -1440, 200)
try:
destiny_window = win32gui.FindWindow(None, "Destiny 2")
win32gui.SetForegroundWindow(destiny_window)
except:
logging.debug("Couldn't find Destiny 2 window, is it running?")
cv2.destroyAllWindows()
#exit(1)
osd = destiny2_bot_osd(screenWidth, screenHeight)
# Add keyboard hotkeys
keyboard.add_hotkey('ctrl+shift+a', osd.add_console, args=['ctrl+shift+a pressed'])
# START EVENT LOOP
while True:
# grab a screenshot of the desktop
frame = np.array(ImageGrab.grab(bbox=(0, 40,
screenWidth, screenHeight)))
osd.fps_update(frame)
osd.write_console(frame)
# show the output frame
# scale frame to window
rect = cv2.getWindowImageRect('Output')
im_scaled = cv2.resize(frame, (rect[2], rect[3]))
# convert frame back to RGB to display correctly
RGB_img = cv2.cvtColor(im_scaled, cv2.COLOR_BGR2RGB)
cv2.imshow("Output", RGB_img)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
logging.info("exiting")
cv2.destroyAllWindows()
if __name__=="__main__":
main()
|
py | 1a3743bab4b96979cdb5cde393b2ad5bfeb351c1 | import gym
import numpy as np
from gym.spaces.box import Box
import pdb
# Taken from https://github.com/openai/universe-starter-agent
def create_atari_env(env_id):
env = gym.make(env_id)
return env
# process each frame
def _process_frame42(frame):
frame = frame[34:34 + 160, :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (42, 42))
frame = frame.mean(2)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [1, 42, 42])
return frame
|
py | 1a3743dd745324f918ec4d41db3b0fa21730b2ae | /home/runner/.cache/pip/pool/7e/43/81/2f29b50bd8a4d5e7f769a0a16a56c800d8a38bbdf53177ff8fa4ed59e6 |
py | 1a37444b214af5701b1d84351bd19a6d9f37cb34 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
class Solution:
def isMatch(self, s: str, p: str) -> bool:
self.s = " "+s
self.p = " "+p
self.matrix = [[0]*len(self.p) for x in self.s]
self.matrix[0][0]=1
for i in range(len(s)+1):
for j in range(1,len(p)+1):
if self.matched(i,j):
self.matrix[i][j]=1
return self.matrix[len(s)][len(p)]
def matched(self,i:int,j:int)->bool:
if self.equal(i, j):
return self.matrix[i-1][j-1]
elif self.p[j]=='*':
if self.equal(i,j-1):
return self.matrix[i][j-2] or self.matrix[i-1][j]
else:
return self.matrix[i][j-2]
def equal(self,i:int,j:int)->bool:
return i != 0 and self.p[j]== '.' or self.s[i]==self.p[j]
a=Solution()
print(a.isMatch("aa","a*"))
print(a.isMatch("ba","a*"))
print(a.isMatch("","*"))
print(a.isMatch("","."))
|
py | 1a37447add543c2d51275accd2297b6f97615871 | """Implementations of assessment abstract base class queries."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class QuestionQuery:
"""This is the query for searching questions.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_question_query_record(self, question_record_type):
"""Gets the question record query corresponding to the given ``Item`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param question_record_type: a question record type
:type question_record_type: ``osid.type.Type``
:return: the question query record
:rtype: ``osid.assessment.records.QuestionQueryRecord``
:raise: ``NullArgument`` -- ``question_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(question_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.QuestionQueryRecord
class AnswerQuery:
"""This is the query for searching answers.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_answer_query_record(self, answer_record_type):
"""Gets the answer record query corresponding to the given ``Answer`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param answer_record_type: an answer record type
:type answer_record_type: ``osid.type.Type``
:return: the answer query record
:rtype: ``osid.assessment.records.AnswerQueryRecord``
:raise: ``NullArgument`` -- ``answer_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(answer_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.AnswerQueryRecord
class ItemQuery:
"""This is the query for searching items.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_learning_objective_id(self, objective_id, match):
"""Sets the learning objective ``Id`` for this query.
:param objective_id: a learning objective ``Id``
:type objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_learning_objective_id_terms(self):
"""Clears all learning objective ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
learning_objective_id_terms = property(fdel=clear_learning_objective_id_terms)
@abc.abstractmethod
def supports_learning_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available.
:return: ``true`` if a learning objective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_learning_objective_query(self):
"""Gets the query for a learning objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the learning objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_learning_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_learning_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
learning_objective_query = property(fget=get_learning_objective_query)
@abc.abstractmethod
def match_any_learning_objective(self, match):
"""Matches an item with any objective.
:param match: ``true`` to match items with any learning objective, ``false`` to match items with no learning objectives
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_learning_objective_terms(self):
"""Clears all learning objective terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
learning_objective_terms = property(fdel=clear_learning_objective_terms)
@abc.abstractmethod
def match_question_id(self, question_id, match):
"""Sets the question ``Id`` for this query.
:param question_id: a question ``Id``
:type question_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``question_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_question_id_terms(self):
"""Clears all question ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
question_id_terms = property(fdel=clear_question_id_terms)
@abc.abstractmethod
def supports_question_query(self):
"""Tests if a ``QuestionQuery`` is available.
:return: ``true`` if a question query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_question_query(self):
"""Gets the query for a question.
Multiple retrievals produce a nested ``OR`` term.
:return: the question query
:rtype: ``osid.assessment.QuestionQuery``
:raise: ``Unimplemented`` -- ``supports_question_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_learning_objective_query()`` is ``true``.*
"""
return # osid.assessment.QuestionQuery
question_query = property(fget=get_question_query)
@abc.abstractmethod
def match_any_question(self, match):
"""Matches an item with any question.
:param match: ``true`` to match items with any question, ``false`` to match items with no questions
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_question_terms(self):
"""Clears all question terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
question_terms = property(fdel=clear_question_terms)
@abc.abstractmethod
def match_answer_id(self, answer_id, match):
"""Sets the answer ``Id`` for this query.
:param answer_id: an answer ``Id``
:type answer_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``answer_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_answer_id_terms(self):
"""Clears all answer ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
answer_id_terms = property(fdel=clear_answer_id_terms)
@abc.abstractmethod
def supports_answer_query(self):
"""Tests if an ``AnswerQuery`` is available.
:return: ``true`` if an answer query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_answer_query(self):
"""Gets the query for an answer.
Multiple retrievals produce a nested ``OR`` term.
:return: the answer query
:rtype: ``osid.assessment.AnswerQuery``
:raise: ``Unimplemented`` -- ``supports_answer_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_learning_objective_query()`` is ``true``.*
"""
return # osid.assessment.AnswerQuery
answer_query = property(fget=get_answer_query)
@abc.abstractmethod
def match_any_answer(self, match):
"""Matches an item with any answer.
:param match: ``true`` to match items with any answer, ``false`` to match items with no answers
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_answer_terms(self):
"""Clears all answer terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
answer_terms = property(fdel=clear_answer_terms)
@abc.abstractmethod
def match_assessment_id(self, assessment_id, match):
"""Sets the assessment ``Id`` for this query.
:param assessment_id: an assessment ``Id``
:type assessment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_id_terms(self):
"""Clears all assessment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_id_terms = property(fdel=clear_assessment_id_terms)
@abc.abstractmethod
def supports_assessment_query(self):
"""Tests if an ``AssessmentQuery`` is available.
:return: ``true`` if an assessment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_query(self):
"""Gets the query for an assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment query
:rtype: ``osid.assessment.AssessmentQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentQuery
assessment_query = property(fget=get_assessment_query)
@abc.abstractmethod
def match_any_assessment(self, match):
"""Matches an item with any assessment.
:param match: ``true`` to match items with any assessment, ``false`` to match items with no assessments
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_terms(self):
"""Clears all assessment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_terms = property(fdel=clear_assessment_terms)
@abc.abstractmethod
def match_bank_id(self, bank_id, match):
"""Sets the bank ``Id`` for this query.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_bank_id_terms(self):
"""Clears all bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_id_terms = property(fdel=clear_bank_id_terms)
@abc.abstractmethod
def supports_bank_query(self):
"""Tests if a ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_bank_query(self):
"""Gets the query for a bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
@abc.abstractmethod
def clear_bank_terms(self):
"""Clears all bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_terms = property(fdel=clear_bank_terms)
@abc.abstractmethod
def get_item_query_record(self, item_record_type):
"""Gets the item record query corresponding to the given ``Item`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param item_record_type: an item record type
:type item_record_type: ``osid.type.Type``
:return: the item query record
:rtype: ``osid.assessment.records.ItemQueryRecord``
:raise: ``NullArgument`` -- ``item_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(item_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.ItemQueryRecord
class AssessmentQuery:
"""This is the query for searching assessments.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_level_id(self, grade_id, match):
"""Sets the level grade ``Id`` for this query.
:param grade_id: a grade ``Id``
:type grade_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_level_id_terms(self):
"""Clears all level ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
level_id_terms = property(fdel=clear_level_id_terms)
@abc.abstractmethod
def supports_level_query(self):
"""Tests if a ``GradeQuery`` is available.
:return: ``true`` if a grade query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_level_query(self):
"""Gets the query for a grade.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade query
:rtype: ``osid.grading.GradeQuery``
:raise: ``Unimplemented`` -- ``supports_level_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_level_query()`` is ``true``.*
"""
return # osid.grading.GradeQuery
level_query = property(fget=get_level_query)
@abc.abstractmethod
def match_any_level(self, match):
"""Matches an assessment that has any level assigned.
:param match: ``true`` to match assessments with any level, ``false`` to match assessments with no level
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_level_terms(self):
"""Clears all level terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
level_terms = property(fdel=clear_level_terms)
@abc.abstractmethod
def match_rubric_id(self, assessment_id, match):
"""Sets the rubric assessment ``Id`` for this query.
:param assessment_id: an assessment ``Id``
:type assessment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_rubric_id_terms(self):
"""Clears all rubric assessment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
rubric_id_terms = property(fdel=clear_rubric_id_terms)
@abc.abstractmethod
def supports_rubric_query(self):
"""Tests if an ``AssessmentQuery`` is available.
:return: ``true`` if a rubric assessment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_rubric_query(self):
"""Gets the query for a rubric assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment query
:rtype: ``osid.assessment.AssessmentQuery``
:raise: ``Unimplemented`` -- ``supports_rubric_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_rubric_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentQuery
rubric_query = property(fget=get_rubric_query)
@abc.abstractmethod
def match_any_rubric(self, match):
"""Matches an assessment that has any rubric assessment assigned.
:param match: ``true`` to match assessments with any rubric, ``false`` to match assessments with no rubric
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_rubric_terms(self):
"""Clears all rubric assessment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
rubric_terms = property(fdel=clear_rubric_terms)
@abc.abstractmethod
def match_item_id(self, item_id, match):
"""Sets the item ``Id`` for this query.
:param item_id: an item ``Id``
:type item_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``item_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_item_id_terms(self):
"""Clears all item ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
item_id_terms = property(fdel=clear_item_id_terms)
@abc.abstractmethod
def supports_item_query(self):
"""Tests if an ``ItemQuery`` is available.
:return: ``true`` if an item query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_item_query(self):
"""Gets the query for an item.
Multiple retrievals produce a nested ``OR`` term.
:return: the item query
:rtype: ``osid.assessment.ItemQuery``
:raise: ``Unimplemented`` -- ``supports_item_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_query()`` is ``true``.*
"""
return # osid.assessment.ItemQuery
item_query = property(fget=get_item_query)
@abc.abstractmethod
def match_any_item(self, match):
"""Matches an assessment that has any item.
:param match: ``true`` to match assessments with any item, ``false`` to match assessments with no items
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_item_terms(self):
"""Clears all item terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
item_terms = property(fdel=clear_item_terms)
@abc.abstractmethod
def match_assessment_offered_id(self, assessment_offered_id, match):
"""Sets the assessment offered ``Id`` for this query.
:param assessment_offered_id: an assessment offered ``Id``
:type assessment_offered_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_offered_id_terms(self):
"""Clears all assessment offered ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_offered_id_terms = property(fdel=clear_assessment_offered_id_terms)
@abc.abstractmethod
def supports_assessment_offered_query(self):
"""Tests if an ``AssessmentOfferedQuery`` is available.
:return: ``true`` if an assessment offered query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_offered_query(self):
"""Gets the query for an assessment offered.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment offered query
:rtype: ``osid.assessment.AssessmentOfferedQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_offered_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentOfferedQuery
assessment_offered_query = property(fget=get_assessment_offered_query)
@abc.abstractmethod
def match_any_assessment_offered(self, match):
"""Matches an assessment that has any offering.
:param match: ``true`` to match assessments with any offering, ``false`` to match assessments with no offerings
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_offered_terms(self):
"""Clears all assessment offered terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_offered_terms = property(fdel=clear_assessment_offered_terms)
@abc.abstractmethod
def match_assessment_taken_id(self, assessment_taken_id, match):
"""Sets the assessment taken ``Id`` for this query.
:param assessment_taken_id: an assessment taken ``Id``
:type assessment_taken_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_taken_id_terms(self):
"""Clears all assessment taken ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_taken_id_terms = property(fdel=clear_assessment_taken_id_terms)
@abc.abstractmethod
def supports_assessment_taken_query(self):
"""Tests if an ``AssessmentTakenQuery`` is available.
:return: ``true`` if an assessment taken query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_taken_query(self):
"""Gets the query for an assessment taken.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment taken query
:rtype: ``osid.assessment.AssessmentTakenQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_taken_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_taken_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentTakenQuery
assessment_taken_query = property(fget=get_assessment_taken_query)
@abc.abstractmethod
def match_any_assessment_taken(self, match):
"""Matches an assessment that has any taken version.
:param match: ``true`` to match assessments with any taken assessments, ``false`` to match assessments with no taken assessments
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_taken_terms(self):
"""Clears all assessment taken terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_taken_terms = property(fdel=clear_assessment_taken_terms)
@abc.abstractmethod
def match_bank_id(self, bank_id, match):
"""Sets the bank ``Id`` for this query.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_bank_id_terms(self):
"""Clears all bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_id_terms = property(fdel=clear_bank_id_terms)
@abc.abstractmethod
def supports_bank_query(self):
"""Tests if a ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_bank_query(self):
"""Gets the query for a bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
@abc.abstractmethod
def clear_bank_terms(self):
"""Clears all bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_terms = property(fdel=clear_bank_terms)
@abc.abstractmethod
def get_assessment_query_record(self, assessment_record_type):
"""Gets the assessment query record corresponding to the given ``Assessment`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param assessment_record_type: an assessment record type
:type assessment_record_type: ``osid.type.Type``
:return: the assessment query record
:rtype: ``osid.assessment.records.AssessmentQueryRecord``
:raise: ``NullArgument`` -- ``assessment_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(assessment_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.AssessmentQueryRecord
class AssessmentOfferedQuery:
"""This is the query for searching assessments.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_assessment_id(self, assessment_id, match):
"""Sets the assessment ``Id`` for this query.
:param assessment_id: an assessment ``Id``
:type assessment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_id_terms(self):
"""Clears all assessment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_id_terms = property(fdel=clear_assessment_id_terms)
@abc.abstractmethod
def supports_assessment_query(self):
"""Tests if an ``AssessmentQuery`` is available.
:return: ``true`` if an assessment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_query(self):
"""Gets the query for an assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment query
:rtype: ``osid.assessment.AssessmentQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentQuery
assessment_query = property(fget=get_assessment_query)
@abc.abstractmethod
def clear_assessment_terms(self):
"""Clears all assessment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_terms = property(fdel=clear_assessment_terms)
@abc.abstractmethod
def match_level_id(self, grade_id, match):
"""Sets the level grade ``Id`` for this query.
:param grade_id: a grade ``Id``
:type grade_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_level_id_terms(self):
"""Clears all level ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
level_id_terms = property(fdel=clear_level_id_terms)
@abc.abstractmethod
def supports_level_query(self):
"""Tests if a ``GradeQuery`` is available.
:return: ``true`` if a grade query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_level_query(self):
"""Gets the query for a grade.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade query
:rtype: ``osid.grading.GradeQuery``
:raise: ``Unimplemented`` -- ``supports_level_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_level_query()`` is ``true``.*
"""
return # osid.grading.GradeQuery
level_query = property(fget=get_level_query)
@abc.abstractmethod
def match_any_level(self, match):
"""Matches an assessment offered that has any level assigned.
:param match: ``true`` to match offerings with any level, ``false`` to match offerings with no levsls
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_level_terms(self):
"""Clears all level terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
level_terms = property(fdel=clear_level_terms)
@abc.abstractmethod
def match_items_sequential(self, match):
"""Match sequential assessments.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_items_sequential_terms(self):
"""Clears all sequential terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
items_sequential_terms = property(fdel=clear_items_sequential_terms)
@abc.abstractmethod
def match_items_shuffled(self, match):
"""Match shuffled item assessments.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_items_shuffled_terms(self):
"""Clears all shuffled terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
items_shuffled_terms = property(fdel=clear_items_shuffled_terms)
@abc.abstractmethod
def match_start_time(self, start, end, match):
"""Matches assessments whose start time falls between the specified range inclusive.
:param start: start of range
:type start: ``osid.calendaring.DateTime``
:param end: end of range
:type end: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_start_time(self, match):
"""Matches offerings that has any start time assigned.
:param match: ``true`` to match offerings with any start time, ``false`` to match offerings with no start time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_start_time_terms(self):
"""Clears all scheduled terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
start_time_terms = property(fdel=clear_start_time_terms)
@abc.abstractmethod
def match_deadline(self, start, end, match):
"""Matches assessments whose end time falls between the specified range inclusive.
:param start: start of range
:type start: ``osid.calendaring.DateTime``
:param end: end of range
:type end: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``NullArgument`` -- ``start`` or ``end`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_deadline(self, match):
"""Matches offerings that have any deadline assigned.
:param match: ``true`` to match offerings with any deadline, ``false`` to match offerings with no deadline
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_deadline_terms(self):
"""Clears all deadline terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
deadline_terms = property(fdel=clear_deadline_terms)
@abc.abstractmethod
def match_duration(self, low, high, match):
"""Matches assessments whose duration falls between the specified range inclusive.
:param low: start range of duration
:type low: ``osid.calendaring.Duration``
:param high: end range of duration
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``NullArgument`` -- ``start`` or ``end`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_duration(self, match):
"""Matches offerings that have any duration assigned.
:param match: ``true`` to match offerings with any duration, ``false`` to match offerings with no duration
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_duration_terms(self):
"""Clears all duration terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
duration_terms = property(fdel=clear_duration_terms)
@abc.abstractmethod
def match_score_system_id(self, grade_system_id, match):
"""Sets the grade system ``Id`` for this query.
:param grade_system_id: a grade system ``Id``
:type grade_system_id: ``osid.id.Id``
:param match: ``true for a positive match, false for a negative match``
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_score_system_id_terms(self):
"""Clears all grade system ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
score_system_id_terms = property(fdel=clear_score_system_id_terms)
@abc.abstractmethod
def supports_score_system_query(self):
"""Tests if a ``GradeSystemQuery`` is available.
:return: ``true`` if a grade system query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_score_system_query(self):
"""Gets the query for a grade system.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade system query
:rtype: ``osid.grading.GradeSystemQuery``
:raise: ``Unimplemented`` -- ``supports_score_system_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_score_system_query()`` is ``true``.*
"""
return # osid.grading.GradeSystemQuery
score_system_query = property(fget=get_score_system_query)
@abc.abstractmethod
def match_any_score_system(self, match):
"""Matches taken assessments that have any grade system assigned.
:param match: ``true`` to match assessments with any grade system, ``false`` to match assessments with no grade system
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_score_system_terms(self):
"""Clears all grade system terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
score_system_terms = property(fdel=clear_score_system_terms)
@abc.abstractmethod
def match_grade_system_id(self, grade_system_id, match):
"""Sets the grade system ``Id`` for this query.
:param grade_system_id: a grade system ``Id``
:type grade_system_id: ``osid.id.Id``
:param match: ``true for a positive match, false for a negative match``
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_grade_system_id_terms(self):
"""Clears all grade system ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
grade_system_id_terms = property(fdel=clear_grade_system_id_terms)
@abc.abstractmethod
def supports_grade_system_query(self):
"""Tests if a ``GradeSystemQuery`` is available.
:return: ``true`` if a grade system query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_grade_system_query(self):
"""Gets the query for a grade system.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade system query
:rtype: ``osid.grading.GradeSystemQuery``
:raise: ``Unimplemented`` -- ``supports_score_system_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_score_system_query()`` is ``true``.*
"""
return # osid.grading.GradeSystemQuery
grade_system_query = property(fget=get_grade_system_query)
@abc.abstractmethod
def match_any_grade_system(self, match):
"""Matches taken assessments that have any grade system assigned.
:param match: ``true`` to match assessments with any grade system, ``false`` to match assessments with no grade system
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_grade_system_terms(self):
"""Clears all grade system terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
grade_system_terms = property(fdel=clear_grade_system_terms)
@abc.abstractmethod
def match_rubric_id(self, assessment_offered_id, match):
"""Sets the rubric assessment offered ``Id`` for this query.
:param assessment_offered_id: an assessment offered ``Id``
:type assessment_offered_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_rubric_id_terms(self):
"""Clears all rubric assessment offered ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
rubric_id_terms = property(fdel=clear_rubric_id_terms)
@abc.abstractmethod
def supports_rubric_query(self):
"""Tests if an ``AssessmentOfferedQuery`` is available.
:return: ``true`` if a rubric assessment offered query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_rubric_query(self):
"""Gets the query for a rubric assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment offered query
:rtype: ``osid.assessment.AssessmentOfferedQuery``
:raise: ``Unimplemented`` -- ``supports_rubric_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_rubric_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentOfferedQuery
rubric_query = property(fget=get_rubric_query)
@abc.abstractmethod
def match_any_rubric(self, match):
"""Matches an assessment offered that has any rubric assessment assigned.
:param match: ``true`` to match assessments offered with any rubric, ``false`` to match assessments offered with no rubric
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_rubric_terms(self):
"""Clears all rubric assessment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
rubric_terms = property(fdel=clear_rubric_terms)
@abc.abstractmethod
def match_assessment_taken_id(self, assessment_taken_id, match):
"""Sets the assessment taken ``Id`` for this query.
:param assessment_taken_id: an assessment taken ``Id``
:type assessment_taken_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_taken_id_terms(self):
"""Clears all assessment taken ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_taken_id_terms = property(fdel=clear_assessment_taken_id_terms)
@abc.abstractmethod
def supports_assessment_taken_query(self):
"""Tests if an ``AssessmentTakenQuery`` is available.
:return: ``true`` if an assessment taken query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_taken_query(self):
"""Gets the query for an assessment taken.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment taken query
:rtype: ``osid.assessment.AssessmentTakenQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_taken_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_taken_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentTakenQuery
assessment_taken_query = property(fget=get_assessment_taken_query)
@abc.abstractmethod
def match_any_assessment_taken(self, match):
"""Matches offerings that have any taken assessment version.
:param match: ``true`` to match offerings with any taken assessment, ``false`` to match offerings with no assessmen taken
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_taken_terms(self):
"""Clears all assessment taken terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_taken_terms = property(fdel=clear_assessment_taken_terms)
@abc.abstractmethod
def match_bank_id(self, bank_id, match):
"""Sets the bank ``Id`` for this query.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_bank_id_terms(self):
"""Clears all bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_id_terms = property(fdel=clear_bank_id_terms)
@abc.abstractmethod
def supports_bank_query(self):
"""Tests if a ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_bank_query(self):
"""Gets the query for a bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
@abc.abstractmethod
def clear_bank_terms(self):
"""Clears all bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_terms = property(fdel=clear_bank_terms)
@abc.abstractmethod
def get_assessment_offered_query_record(self, assessment_offered_record_type):
"""Gets the assessment offered query record corresponding to the given ``AssessmentOffered`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param assessment_offered_record_type: an assessment offered record type
:type assessment_offered_record_type: ``osid.type.Type``
:return: the assessment offered query record
:rtype: ``osid.assessment.records.AssessmentOfferedQueryRecord``
:raise: ``NullArgument`` -- ``assessment_offered_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(assessment_offered_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.AssessmentOfferedQueryRecord
class AssessmentTakenQuery:
"""This is the query for searching assessments.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_assessment_offered_id(self, assessment_offered_id, match):
"""Sets the assessment offered ``Id`` for this query.
:param assessment_offered_id: an assessment ``Id``
:type assessment_offered_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_offered_id_terms(self):
"""Clears all assessment offered ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_offered_id_terms = property(fdel=clear_assessment_offered_id_terms)
@abc.abstractmethod
def supports_assessment_offered_query(self):
"""Tests if an ``AssessmentOfferedQuery`` is available.
:return: ``true`` if an assessment offered query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_offered_query(self):
"""Gets the query for an assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment offered query
:rtype: ``osid.assessment.AssessmentOfferedQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_offered_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentOfferedQuery
assessment_offered_query = property(fget=get_assessment_offered_query)
@abc.abstractmethod
def clear_assessment_offered_terms(self):
"""Clears all assessment offered terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_offered_terms = property(fdel=clear_assessment_offered_terms)
@abc.abstractmethod
def match_taker_id(self, resource_id, match):
"""Sets the resource ``Id`` for this query.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_taker_id_terms(self):
"""Clears all resource ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
taker_id_terms = property(fdel=clear_taker_id_terms)
@abc.abstractmethod
def supports_taker_query(self):
"""Tests if a ``ResourceQuery`` is available.
:return: ``true`` if a resource query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_taker_query(self):
"""Gets the query for a resource.
Multiple retrievals produce a nested ``OR`` term.
:return: the resource query
:rtype: ``osid.resource.ResourceQuery``
:raise: ``Unimplemented`` -- ``supports_taker_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_taker_query()`` is ``true``.*
"""
return # osid.resource.ResourceQuery
taker_query = property(fget=get_taker_query)
@abc.abstractmethod
def clear_taker_terms(self):
"""Clears all resource terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
taker_terms = property(fdel=clear_taker_terms)
@abc.abstractmethod
def match_taking_agent_id(self, agent_id, match):
"""Sets the agent ``Id`` for this query.
:param agent_id: an agent ``Id``
:type agent_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_taking_agent_id_terms(self):
"""Clears all agent ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
taking_agent_id_terms = property(fdel=clear_taking_agent_id_terms)
@abc.abstractmethod
def supports_taking_agent_query(self):
"""Tests if an ``AgentQuery`` is available.
:return: ``true`` if an agent query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_taking_agent_query(self):
"""Gets the query for an agent.
Multiple retrievals produce a nested ``OR`` term.
:return: the agent query
:rtype: ``osid.authentication.AgentQuery``
:raise: ``Unimplemented`` -- ``supports_taking_agent_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_taking_agent_query()`` is ``true``.*
"""
return # osid.authentication.AgentQuery
taking_agent_query = property(fget=get_taking_agent_query)
@abc.abstractmethod
def clear_taking_agent_terms(self):
"""Clears all taking agent terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
taking_agent_terms = property(fdel=clear_taking_agent_terms)
@abc.abstractmethod
def match_actual_start_time(self, start, end, match):
"""Matches assessments whose start time falls between the specified range inclusive.
:param start: start of range
:type start: ``osid.calendaring.DateTime``
:param end: end of range
:type end: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``NullArgument`` -- ``start`` or ``end`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_actual_start_time(self, match):
"""Matches taken assessments taken that have begun.
:param match: ``true`` to match assessments taken started, ``false`` to match assessments taken that have not begun
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_actual_start_time_terms(self):
"""Clears all start time terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
actual_start_time_terms = property(fdel=clear_actual_start_time_terms)
@abc.abstractmethod
def match_completion_time(self, start, end, match):
"""Matches assessments whose completion time falls between the specified range inclusive.
:param start: start of range
:type start: ``osid.calendaring.DateTime``
:param end: end of range
:type end: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``NullArgument`` -- ``start`` or ``end`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_completion_time(self, match):
"""Matches taken assessments taken that have completed.
:param match: ``true`` to match assessments taken completed, ``false`` to match assessments taken that are incomplete
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_completion_time_terms(self):
"""Clears all in completion time terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
completion_time_terms = property(fdel=clear_completion_time_terms)
@abc.abstractmethod
def match_time_spent(self, low, high, match):
"""Matches assessments where the time spent falls between the specified range inclusive.
:param low: start of duration range
:type low: ``osid.calendaring.Duration``
:param high: end of duration range
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``low`` or ``high`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_time_spent_terms(self):
"""Clears all in time spent terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
time_spent_terms = property(fdel=clear_time_spent_terms)
@abc.abstractmethod
def match_score_system_id(self, grade_system_id, match):
"""Sets the grade system ``Id`` for this query.
:param grade_system_id: a grade system ``Id``
:type grade_system_id: ``osid.id.Id``
:param match: ``true for a positive match, false for a negative match``
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_score_system_id_terms(self):
"""Clears all grade system ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
score_system_id_terms = property(fdel=clear_score_system_id_terms)
@abc.abstractmethod
def supports_score_system_query(self):
"""Tests if a ``GradeSystemQuery`` is available.
:return: ``true`` if a grade system query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_score_system_query(self):
"""Gets the query for a grade system.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade system query
:rtype: ``osid.grading.GradeSystemQuery``
:raise: ``Unimplemented`` -- ``supports_score_system_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_score_system_query()`` is ``true``.*
"""
return # osid.grading.GradeSystemQuery
score_system_query = property(fget=get_score_system_query)
@abc.abstractmethod
def match_any_score_system(self, match):
"""Matches taken assessments that have any grade system assigned.
:param match: ``true`` to match assessments with any grade system, ``false`` to match assessments with no grade system
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_score_system_terms(self):
"""Clears all grade system terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
score_system_terms = property(fdel=clear_score_system_terms)
@abc.abstractmethod
def match_score(self, low, high, match):
"""Matches assessments whose score falls between the specified range inclusive.
:param low: start of range
:type low: ``decimal``
:param high: end of range
:type high: ``decimal``
:param match: ``true`` for a positive match, ``false`` for negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_score(self, match):
"""Matches taken assessments that have any score assigned.
:param match: ``true`` to match assessments with any score, ``false`` to match assessments with no score
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_score_terms(self):
"""Clears all score terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
score_terms = property(fdel=clear_score_terms)
@abc.abstractmethod
def match_grade_id(self, grade_id, match):
"""Sets the grade ``Id`` for this query.
:param grade_id: a grade ``Id``
:type grade_id: ``osid.id.Id``
:param match: ``true for a positive match, false for a negative match``
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_grade_id_terms(self):
"""Clears all grade ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
grade_id_terms = property(fdel=clear_grade_id_terms)
@abc.abstractmethod
def supports_grade_query(self):
"""Tests if a ``GradeQuery`` is available.
:return: ``true`` if a grade query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_grade_query(self):
"""Gets the query for a grade.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade query
:rtype: ``osid.grading.GradeQuery``
:raise: ``Unimplemented`` -- ``supports_grade_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_query()`` is ``true``.*
"""
return # osid.grading.GradeQuery
grade_query = property(fget=get_grade_query)
@abc.abstractmethod
def match_any_grade(self, match):
"""Matches taken assessments that have any grade assigned.
:param match: ``true`` to match assessments with any grade, ``false`` to match assessments with no grade
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_grade_terms(self):
"""Clears all grade terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
grade_terms = property(fdel=clear_grade_terms)
@abc.abstractmethod
def match_feedback(self, comments, string_match_type, match):
"""Sets the comment string for this query.
:param comments: comment string
:type comments: ``string``
:param string_match_type: the string match type
:type string_match_type: ``osid.type.Type``
:param match: ``true`` for a positive match, ``false`` for negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``comments is`` not of ``string_match_type``
:raise: ``NullArgument`` -- ``comments`` or ``string_match_type`` is ``null``
:raise: ``Unsupported`` -- ``supports_string_match_type(string_match_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_feedback(self, match):
"""Matches taken assessments that have any comments.
:param match: ``true`` to match assessments with any comments, ``false`` to match assessments with no comments
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_feedback_terms(self):
"""Clears all comment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
feedback_terms = property(fdel=clear_feedback_terms)
@abc.abstractmethod
def match_rubric_id(self, assessment_taken_id, match):
"""Sets the rubric assessment taken ``Id`` for this query.
:param assessment_taken_id: an assessment taken ``Id``
:type assessment_taken_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_rubric_id_terms(self):
"""Clears all rubric assessment taken ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
rubric_id_terms = property(fdel=clear_rubric_id_terms)
@abc.abstractmethod
def supports_rubric_query(self):
"""Tests if an ``AssessmentTakenQuery`` is available.
:return: ``true`` if a rubric assessment taken query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_rubric_query(self):
"""Gets the query for a rubric assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment taken query
:rtype: ``osid.assessment.AssessmentTakenQuery``
:raise: ``Unimplemented`` -- ``supports_rubric_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_rubric_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentTakenQuery
rubric_query = property(fget=get_rubric_query)
@abc.abstractmethod
def match_any_rubric(self, match):
"""Matches an assessment taken that has any rubric assessment assigned.
:param match: ``true`` to match assessments taken with any rubric, ``false`` to match assessments taken with no rubric
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_rubric_terms(self):
"""Clears all rubric assessment taken terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
rubric_terms = property(fdel=clear_rubric_terms)
@abc.abstractmethod
def match_bank_id(self, bank_id, match):
"""Sets the bank ``Id`` for this query.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_bank_id_terms(self):
"""Clears all bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_id_terms = property(fdel=clear_bank_id_terms)
@abc.abstractmethod
def supports_bank_query(self):
"""Tests if a ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_bank_query(self):
"""Gets the query for a bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
@abc.abstractmethod
def clear_bank_terms(self):
"""Clears all bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_terms = property(fdel=clear_bank_terms)
@abc.abstractmethod
def get_assessment_taken_query_record(self, assessment_taken_record_type):
"""Gets the assessment taken query record corresponding to the given ``AssessmentTaken`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param assessment_taken_record_type: an assessment taken record type
:type assessment_taken_record_type: ``osid.type.Type``
:return: the assessment taken query record
:rtype: ``osid.assessment.records.AssessmentTakenQueryRecord``
:raise: ``NullArgument`` -- ``assessment_taken_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(assessment_taken_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.AssessmentTakenQueryRecord
class BankQuery:
"""This is the query for searching banks Each method specifies an ``AND`` term while multiple invocations of the same method produce a nested ``OR``."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_item_id(self, item_id, match):
"""Sets the item ``Id`` for this query.
:param item_id: an item ``Id``
:type item_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``item_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_item_id_terms(self):
"""Clears all item ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
item_id_terms = property(fdel=clear_item_id_terms)
@abc.abstractmethod
def supports_item_query(self):
"""Tests if a ``ItemQuery`` is available.
:return: ``true`` if an item query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_item_query(self):
"""Gets the query for an item.
Multiple retrievals produce a nested ``OR`` term.
:return: the item query
:rtype: ``osid.assessment.ItemQuery``
:raise: ``Unimplemented`` -- ``supports_item_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_query()`` is ``true``.*
"""
return # osid.assessment.ItemQuery
item_query = property(fget=get_item_query)
@abc.abstractmethod
def match_any_item(self, match):
"""Matches assessment banks that have any item assigned.
:param match: ``true`` to match banks with any item, ``false`` to match assessments with no item
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_item_terms(self):
"""Clears all item terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
item_terms = property(fdel=clear_item_terms)
@abc.abstractmethod
def match_assessment_id(self, assessment_id, match):
"""Sets the assessment ``Id`` for this query.
:param assessment_id: an assessment ``Id``
:type assessment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_id_terms(self):
"""Clears all assessment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_id_terms = property(fdel=clear_assessment_id_terms)
@abc.abstractmethod
def supports_assessment_query(self):
"""Tests if an ``AssessmentQuery`` is available.
:return: ``true`` if an assessment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_query(self):
"""Gets the query for an assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment query
:rtype: ``osid.assessment.AssessmentQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentQuery
assessment_query = property(fget=get_assessment_query)
@abc.abstractmethod
def match_any_assessment(self, match):
"""Matches assessment banks that have any assessment assigned.
:param match: ``true`` to match banks with any assessment, ``false`` to match banks with no assessment
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_terms(self):
"""Clears all assessment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_terms = property(fdel=clear_assessment_terms)
@abc.abstractmethod
def match_assessment_offered_id(self, assessment_offered_id, match):
"""Sets the assessment offered ``Id`` for this query.
:param assessment_offered_id: an assessment ``Id``
:type assessment_offered_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_offered_id_terms(self):
"""Clears all assessment offered ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_offered_id_terms = property(fdel=clear_assessment_offered_id_terms)
@abc.abstractmethod
def supports_assessment_offered_query(self):
"""Tests if an ``AssessmentOfferedQuery`` is available.
:return: ``true`` if an assessment offered query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_offered_query(self):
"""Gets the query for an assessment offered.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment offered query
:rtype: ``osid.assessment.AssessmentOfferedQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_offered_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentOfferedQuery
assessment_offered_query = property(fget=get_assessment_offered_query)
@abc.abstractmethod
def match_any_assessment_offered(self, match):
"""Matches assessment banks that have any assessment offering assigned.
:param match: ``true`` to match banks with any assessment offering, ``false`` to match banks with no offering
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_offered_terms(self):
"""Clears all assessment offered terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_offered_terms = property(fdel=clear_assessment_offered_terms)
@abc.abstractmethod
def match_ancestor_bank_id(self, bank_id, match):
"""Sets the bank ``Id`` for to match banks in which the specified bank is an acestor.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_bank_id_terms(self):
"""Clears all ancestor bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_bank_id_terms = property(fdel=clear_ancestor_bank_id_terms)
@abc.abstractmethod
def supports_ancestor_bank_query(self):
"""Tests if a ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_ancestor_bank_query(self):
"""Gets the query for an ancestor bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_ancestor_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_ancestor_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
ancestor_bank_query = property(fget=get_ancestor_bank_query)
@abc.abstractmethod
def match_any_ancestor_bank(self, match):
"""Matches a bank that has any ancestor.
:param match: ``true`` to match banks with any ancestor banks, ``false`` to match root banks
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_bank_terms(self):
"""Clears all ancestor bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_bank_terms = property(fdel=clear_ancestor_bank_terms)
@abc.abstractmethod
def match_descendant_bank_id(self, bank_id, match):
"""Sets the bank ``Id`` for to match banks in which the specified bank is a descendant.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_bank_id_terms(self):
"""Clears all descendant bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_bank_id_terms = property(fdel=clear_descendant_bank_id_terms)
@abc.abstractmethod
def supports_descendant_bank_query(self):
"""Tests if a ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_descendant_bank_query(self):
"""Gets the query for a descendant bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_descendant_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_descendant_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
descendant_bank_query = property(fget=get_descendant_bank_query)
@abc.abstractmethod
def match_any_descendant_bank(self, match):
"""Matches a bank that has any descendant.
:param match: ``true`` to match banks with any descendant banks, ``false`` to match leaf banks
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_bank_terms(self):
"""Clears all descendant bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_bank_terms = property(fdel=clear_descendant_bank_terms)
@abc.abstractmethod
def get_bank_query_record(self, bank_record_type):
"""Gets the bank query record corresponding to the given ``Bank`` record ``Type``.
Multiple record retrievals produce a nested ``OR`` term.
:param bank_record_type: a bank record type
:type bank_record_type: ``osid.type.Type``
:return: the bank query record
:rtype: ``osid.assessment.records.BankQueryRecord``
:raise: ``NullArgument`` -- ``bank_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(bank_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.BankQueryRecord
|
py | 1a3744ed53b7379aae6ce92cca82a773b4258b02 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from .mixins import deferred, Base
class PopulationSample(Base, db.Model):
__tablename__ = 'population_samples'
response_id = deferred(
db.Column(db.Integer, db.ForeignKey('responses.id'), nullable=False),
'PopulationSample')
population_document_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id')), 'PopulationSample')
population = deferred(db.Column(db.Integer), 'PopulationSample')
sample_worksheet_document_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id')), 'PopulationSample')
samples = deferred(db.Column(db.Integer), 'PopulationSample')
sample_evidence_document_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id')), 'PopulationSample')
_publish_attrs = [
'response',
'population_document',
'population',
'sample_worksheet_document',
'sample_evidence_document',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(PopulationSample, cls).eager_query()
return query.options(
orm.subqueryload('response'),
orm.subqueryload('population_document'),
orm.subqueryload('sample_worksheet_document'),
orm.subqueryload('sample_evidence_document'))
|
py | 1a374529f73e987752e0fdeafaf4d020b6959e05 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy.linalg import expm, sqrtm
from scipy.sparse.linalg import spsolve
plt.style.use('ggplot')
import matplotlib.pylab as pylab
data_lookup = pd.read_csv('Vol&Area_vs_depth_CSV.csv', header=0, sep=',')
def lookup_V(V):
V = V * 1000000
if V <= data_lookup.loc[0, 'Volume (Mm3)']:
h = data_lookup.loc[0, 'Water level (m +NAP)']
A_wet = data_lookup.loc[0, 'Area (m2)']
elif V >= data_lookup.loc[22, 'Volume (Mm3)']:
h = data_lookup.loc[22, 'Water level (m +NAP)']
A_wet = data_lookup.loc[22, 'Area (m2)']
else:
for i in np.arange(22, 0, -1):
if V <= data_lookup.loc[i, 'Volume (Mm3)']:
h = (data_lookup.loc[i, 'Water level (m +NAP)'] + data_lookup.loc[i - 1, 'Water level (m +NAP)']) / 2
A_wet = (data_lookup.loc[i, 'Area (m2)'] + data_lookup.loc[i - 1, 'Area (m2)']) / 2
else:
break
return h, A_wet
def lookup_h(h):
if h <= data_lookup.loc[0, 'Water level (m +NAP)']:
V = data_lookup.loc[0, 'Volume (Mm3)']
A_wet = data_lookup.loc[0, 'Area (m2)']
elif h >= data_lookup.loc[22, 'Water level (m +NAP)']:
V = data_lookup.loc[22, 'Volume (Mm3)']
A_wet = data_lookup.loc[22, 'Area (m2)']
else:
for i in np.arange(22, 0, -1):
if h <= data_lookup.loc[i, 'Water level (m +NAP)']:
V = (data_lookup.loc[i, 'Volume (Mm3)'] + data_lookup.loc[i-1, 'Volume (Mm3)'])/2
A_wet = (data_lookup.loc[i, 'Area (m2)'] + data_lookup.loc[i-1, 'Area (m2)'])/2
else:
break
V = V / 1000000
return V, A_wet
def read_Rhine(scenario, date_start, date_end):
## options for scenario: "Historical", "Reference", "2050GL" and "2050WH"
## date_start is starting year from which Rhine data is read
## date_end is last year included in the data (up to and including)
if scenario == 'Historical':
## Historical time series: 1824-nov-12 until 2021-mrt-14
data0 = pd.read_csv('Waterstand_Historical_Lobith.DagGem.csv', sep=';', names=['Time', 'WL']) ## Read dataset
data0['Date'] = pd.DatetimeIndex(data0.Time).normalize() ## Time-field to date format (year-month-day)
data0.index = data0.loc[:, 'Date'] ## Set index column
data0 = data0.drop(columns=['Date', 'Time']) ## Drop columns that are (now) irrelevant
data0['WL'] = data0['WL'].replace(-99) ## Replace -99 of water levels with np.nan
if scenario == 'Reference':
## Reference (2017) time series: 1911-oct-02 until 2011-oct-31
data0 = pd.read_csv('Waterstand_Ref2017.csv', sep=';', decimal=',', header=0, names=['Time', 'loc', 'parm', 'scen', 'unit', 'WL']) ## Read dataset
data0['Date'] = pd.to_datetime(data0['Time'], format='%d-%m-%Y')
data0.index = data0.loc[:, 'Date']
data0 = data0.drop(columns=['Date', 'Time', 'loc', 'parm', 'scen', 'unit'])
## data0['WL'] = data0['WL'].replace(',', '.')
data0['WL'] = data0['WL'].fillna(-99)
data0['WL'] = data0['WL'].astype(float)
data0['WL'] = data0['WL'].replace(-99)
if scenario == '2050GL':
## 2050 Rust GL time series: 1911-oct-02 until 2011-oct-31
data0 = pd.read_csv('Waterstand_Rust2050(GL).csv', sep=';', decimal=',', header=0,
names=['Time', 'loc', 'parm', 'scen', 'unit', 'WL']) ## Read dataset
data0['Date'] = pd.to_datetime(data0['Time'], format='%d-%m-%Y')
data0.index = data0.loc[:, 'Date']
data0 = data0.drop(columns=['Date', 'Time', 'loc', 'parm', 'scen', 'unit'])
data0['WL'] = data0['WL'].fillna(-99)
data0['WL'] = data0['WL'].astype(float)
data0['WL'] = data0['WL'].replace(-99)
if scenario == '2050WH':
## 2050 Stoom WH time series: 1911-oct-02 until 2011-oct-31
data0 = pd.read_csv('Waterstand_Stoom2050(WH).csv', sep=';', decimal=',', header=0, names=['Time', 'loc', 'parm', 'scen', 'unit', 'WL']) ## Read dataset
data0['Date'] = pd.to_datetime(data0['Time'], format='%d-%m-%Y')
data0.index = data0.loc[:, 'Date']
data0 = data0.drop(columns=['Date', 'Time', 'loc', 'parm', 'scen', 'unit'])
## data0['WL'] = data0['WL'].replace(',', '.')
data0['WL'] = data0['WL'].fillna(-99)
data0['WL'] = data0['WL'].astype(float)
data0['WL'] = data0['WL'].replace(-99)
## From the original data (data0) select the part to work with:
data = pd.DataFrame(data=data0.loc[str(date_start):str(date_end)])
return data
def read_climate(scenario, date_start, date_end):
## options for scenario: "Historical", "Reference", "2050GL" and "2050WH"
## date_start is starting year from which Rhine data is read
## date_end is last year included in the data (up to and including)
if scenario == 'Historical':
## Historical: 1957-jul-01 until 2021-jun-21 (present)
df = pd.read_csv('Climate_Historical_DeBilt.txt', sep=',', skipinitialspace=True)
df['Date'] = pd.to_datetime(df.YYYYMMDD, format='%Y%m%d')
df.index = df.loc[:, 'Date']
df = df.drop(columns=['STN', 'YYYYMMDD', 'DDVEC', 'FHVEC', 'FG', 'FHX', 'FHXH', 'FHN', 'FHNH', 'FXX', 'FXXH', 'TG', 'TN',
'TNH', 'TX', 'TXH', 'T10N', 'T10NH', 'SQ', 'SP', 'Q', 'DR', 'RHX', 'RHXH', 'PG', 'PX', 'PXH', 'PN',
'PNH', 'VVN', 'VVNH', 'VVX', 'VVXH', 'NG', 'UG', 'UX', 'UN', 'UXH', 'UNH', 'Date'])
df.columns = ['P', 'EV']
df = df.loc[str(date_start):str(date_end)]
df.P = df.P.replace(-1, 0) ## -1 was the value for P < 0.05mm, now this is set to 0mm
df.P = df.P*0.1 ## etmaalsom van de neerslag (from 0.1 mm to mm)
df.EV = df.EV*0.1 ## referentiegewasverdamping Makkink (from 0.1 mm to mm)
if scenario == 'Reference':
## Reference (2014): 1906-01-01 until 2014-12-31
df = pd.read_csv('Climate_Ref2014.csv', sep=',', header=0)
df['Datum'] = pd.to_datetime(df.Datum, format='%m/%d/%Y')
df.index = df.loc[:, 'Datum']
df = df.loc[str(date_start):str(date_end)]
df['P'] = df['G']
df['EV'] = df['Makkink (mm)']
df = df.drop(columns=['Datum', 'Makkink (mm)', 'G', 'H', 'H+', 'L'])
if scenario == '2050GL':
## Climate 2050 GL: 1906-01-01 until 2014-12-31
df = pd.read_csv('Climate_2050GL.csv', sep=',', header=0)
df['Datum'] = pd.to_datetime(df.Datum)
df.index = df.loc[:, 'Datum']
df = df.loc[str(date_start):str(date_end)]
df['P'] = df['G_center']
df['EV'] = df['Makkink (mm)']
df = df.drop(columns=['Datum', 'Makkink (mm)', 'G_lower', 'G_center', 'G_upper', 'H_lower', 'H_center', 'H_upper',
'Hplus_lower', 'Hplus_center', 'Hplus_upper', 'L_lower', 'L_center', 'L_upper'])
if scenario == '2050WH':
## Climate 2050 WH: 1906-01-01 until 2014-12-31
df = pd.read_csv('Climate_2050WH.csv', sep=',', header=0)
df['Datum'] = pd.to_datetime(df.Datum)
df.index = df.loc[:, 'Datum']
df = df.loc[str(date_start):str(date_end)]
df['P'] = df['G_center']
df['EV'] = df['Makkink (mm)']
df = df.drop(columns=['Datum', 'Makkink (mm)', 'G_lower', 'G_center', 'G_upper', 'H_lower', 'H_center', 'H_upper',
'Hplus_lower', 'Hplus_center', 'Hplus_upper', 'L_lower', 'L_center', 'L_upper'])
return df
def groundwater(h_Rijnstrangen, Q_GWleft, Q_GWright, plot_phi=False, plot_q=False, plot_s=False):
## options for h_Rijnstrangen: value, most likely between 10.0 and 15.5 m +NAP
## Q_GWleft and Q_GWright respectively are the groundwater extraction on the left (southwest) and right (northeast) of the Rijnstrangen
## plot_phi, plot_q and plot_s can be True or False, dependent on if the heads (phi), groundwater flow (q) or seepage (s) have to be plotted
A_wet = lookup_h(h_Rijnstrangen)[1]
fraction = A_wet/A_tot
x_left = (2000 - (2000 * fraction)) / 2
x_right = 2000 - x_left
c_var = 10000
## c, (nLay by nSec) matrix of vertical resistance values of top layer and aquitards
## MULTI LAYER SYSTEM
# c = np.array([[1, 650, c_var, 80, c_var, 250, 225],
# [1.0e4, 3.2e4, 3.2e4, 3.2e4, 3.2e4, 1.0e4, 5.0e3],
# [1.0e2, 5.0e2, 1.0e3, 5.0e2, 5.0e2, 5.0e2, 5.0e2]])
## ONE LAYER SYSTEM
c = np.array([[1, 650, c_var, 80, c_var, 250, 225]])
## T, (nLay by nSec) matrix of transmissivity values
## MULTI LAYER SYSTEM
# kD = np.array([[250, 750, 750, 750, 750, 750, 750],
# [500, 100, 50, 50, 50, 50, 250],
# [400, 400, 500, 500, 500, 500, 500]])
## ONE LAYER SYSTEM
kD = np.array([[250, 750, 750, 750, 750, 750, 750]])
## h, (1 by nSec) vector of heads on top of each section (surface water level in m+NAP)
heads = np.array([9.2, 9.8, 9.8, h_Rijnstrangen, 9.7, 9.7, 9.1])
## Q, (nNod by nSec) matrix of nodal injections [L2/T]
## MULTI LAYER SYSTEM
# Q = np.array([[0, Q_GWleft, 0, 0, Q_GWright, 0],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0]])
## ONE LAYER SYSTEM
Q = np.array([[0, Q_GWleft, 0, 0, Q_GWright, 0]])
## x, (nNod by 1) vector of coordinates of intersection points except +/-inf
x = np.array([-3100, 0, x_left, x_right, 2000, 3000])
## X, vector of points where values will be computed
X = np.arange(-3700, 5510, 10)
nLay = len(kD[:, 0])
nSec = len(kD[0, :])
## include the outer sections to infinity
a = np.zeros((nLay, 1))
Q = np.concatenate((a, Q, a), axis=1)
x = np.append(x, math.inf)
x = np.append(-math.inf, x)
Nx = len(x)
H = np.ones((nLay, 1)) * heads
## Mid-section points are used to compute relative coordinates within sections
xMidSec = 0.5 * (x[:-1] + x[1:])
xMidSec[0] = x[1]
xMidSec[-1] = x[-2]
## System matrices for all sections
A = np.zeros((nLay, nLay, nSec))
for iSec in range(nSec):
a = 1 / (kD[:, iSec] * c[:, iSec])
p = np.append(c[1:nLay, iSec], math.inf)
b = 1 / (kD[:, iSec] * p)
A[:, :, iSec] = np.diag(a + b) - np.diag(a[1:nLay], -1) - np.diag(b[0:nLay - 1], 1)
## Generating and filling the coefficient matrix C
C = np.zeros((nLay * (2 * (Nx - 2)), nLay * (2 * (Nx - 2) + 2))) ## coefficient matrix
R = np.zeros((nLay * (2 * (Nx - 2)), 1)) ## right hand side vector
nNod = nSec - 1
for i in range(nNod):
## i is section N, also the left node number of the section
## j is right the node number of the section
## ii and jj point to the position within the total coefficient matrix
j = i + 1
ii = 2 * nLay * (i)
jj = ii + nLay
C[ii:jj, ii:jj] = +expm(-(x[j] - xMidSec[i]) * sqrtm(A[:, :, i]))
C[ii:jj, ii + nLay:jj + nLay] = +expm(+(x[j] - xMidSec[i]) * sqrtm(A[:, :, i]))
C[ii:jj, ii + 2 * nLay: jj + 2 * nLay] = -expm(-(x[j] - xMidSec[j]) * sqrtm(A[:, :, j]))
C[ii:jj, ii + 3 * nLay: jj + 3 * nLay] = -expm(+(x[j] - xMidSec[j]) * sqrtm(A[:, :, j]))
R[ii:jj] = np.vstack(-H[:, i] + H[:, j])
C[ii + nLay:jj + nLay, ii:jj] = np.matmul(np.matmul(-np.diag(kD[:, i]), sqrtm(A[:, :, i])),
expm(-(x[j] - xMidSec[i]) * sqrtm(A[:, :, i])))
C[ii + nLay:jj + nLay, ii + nLay:jj + nLay] = np.matmul(np.matmul(+np.diag(kD[:, i]), sqrtm(A[:, :, i])),
expm(+(x[j] - xMidSec[i]) * sqrtm(A[:, :, i])))
C[ii + nLay:jj + nLay, ii + 2 * nLay:jj + 2 * nLay] = np.matmul(
np.matmul(+np.diag(kD[:, j]), sqrtm(A[:, :, j])), expm(-(x[j] - xMidSec[j]) * sqrtm(A[:, :, j])))
C[ii + nLay:jj + nLay, ii + 3 * nLay:jj + 3 * nLay] = np.matmul(
np.matmul(-np.diag(kD[:, j]), sqrtm(A[:, :, j])), expm(+(x[j] - xMidSec[j]) * sqrtm(A[:, :, j])))
R[ii + nLay:jj + nLay] = np.vstack(Q[:, j])
## Solve the system, using all layers and leaving out the outer column as they have no freedom, because the sections extend to infinity
COEF = np.vstack(spsolve((C[:, nLay:-nLay]), R))
COEF = np.concatenate((np.zeros((nLay, 1)), COEF, np.zeros((nLay, 1))))
## output:
## phi [H] = computed heads, a (nLay by length(X)) matrix
## q [L2/T] = computed flows, a (nLay by length(X)) matrix
## s [L/T] = downward positive seepage rate through top of each layer, a nLay by length(X) matrix
phi = np.zeros((nLay, len(X)))
q = np.zeros((nLay, len(X)))
s = np.zeros((nLay, len(X)))
for i in range(len(X)):
iSec = np.nonzero(np.logical_and(X[i] > x[:-1], X[i] <= x[1:]))
iSec = iSec[0]
iSec = iSec[0]
k = 2 * nLay * (iSec)
l = k + nLay
C1 = np.matmul(expm(-(X[i] - xMidSec[iSec]) * sqrtm(A[:, :, iSec])), COEF[k:l])
C2 = np.matmul(expm(+(X[i] - xMidSec[iSec]) * sqrtm(A[:, :, iSec])), COEF[k + nLay:l + nLay])
C3 = np.matmul(sqrtm(A[:, :, iSec]), C1)
C4 = np.matmul(sqrtm(A[:, :, iSec]), C2)
phi[:, i] = np.hstack(C1) + np.hstack(C2) + (H[:, iSec])
q[:, i] = np.hstack(np.matmul(np.diag(kD[:, iSec]), (C3 - C4)))
sNet = np.matmul(np.matmul(np.diag(kD[:, iSec]), sqrtm(A[:, :, iSec])), (C3 + C4))
s[nLay - 1, i] = sNet[nLay - 1]
for iLay in np.arange(nLay - 2, -1, -1):
s[iLay, i] = sNet[iLay] + s[iLay + 1, i]
## MULTI LAYER SYSTEM
# qsum = q[0, :] + q[1, :] + q[2, :]
## ONE LAYER SYSTEM
qsum = q[0, :]
qleft_Rijnstr = qsum[371] ## Q left of Rijnstrangen
qright_Rijnstr = qsum[570] ## Q right of Rijnstrangen
qtot = (qright_Rijnstr - qleft_Rijnstr) * 12000 / 1000000 ## gives q_total out of Rijnstrangen in Mm3/d
Q_extracted = (-Q_GWleft - Q_GWright) * 12000 / 1000000
qleft_region= qsum[370] ## Q right of Rijnstrangen coming from surrounding
qright_region = qsum[571] ## Q left of Rijnstrangen coming from surrounding
# qtot_region = (qleft_region - qright_region) * 12000 / 1000000 ## gives q_total coming from surrounding region in Mm3/d
if qleft_region > 0 and qright_region < 0:
Q_perc_Rijnstr = qtot / Q_extracted
if qleft_region > 0 and qright_region > 0:
Q_perc_Rijnstr = (qtot - (qright_region*12000/1000000)) / Q_extracted
if qleft_region < 0 and qright_region < 0:
Q_perc_Rijnstr = (qtot - (-qleft_region * 12000 / 1000000)) / Q_extracted
if qleft_region < 0 and qright_region > 0:
Q_perc_Rijnstr = 1
Q_perc_region = 1 - Q_perc_Rijnstr
if plot_phi is True:
plt.figure(figsize=(15, 10))
plt.axvline(-3700, c='dimgrey', linestyle=':')
plt.axvline(-3100, c='dimgrey', linestyle=':')
plt.axvline(0, c='dimgrey', linestyle=':', label='Fixed section separation')
plt.axvline(x_left, c='darkgrey', linestyle=':', label='Flexible section separation')
plt.axvline(x_right, c='darkgrey', linestyle=':')
plt.axvline(2000, c='dimgrey', linestyle=':')
plt.axvline(3000, c='dimgrey', linestyle=':')
plt.axvline(5500, c='dimgrey', linestyle=':')
# v = phi[0,:].min()
v = 8.9
marge = v*0.005
plt.plot([-3700, -3100], [v - marge, v - marge], c='grey')
plt.plot([0, 2000], [v - marge, v - marge], c='grey')
plt.text(-3400, v, 'Rhine', color='grey', horizontalalignment='center', fontsize=14)
plt.text(1000, v, 'Rijnstrangen', color='grey', horizontalalignment='center', fontsize=14)
plt.plot(X, phi[0, :], label='Modelled head in first aquifer \n for average situation', c='darkred', linestyle='--', linewidth=2)
## ONLY FOR MULTY LAYER SYSTEM:
# plt.plot(X, phi[1, :], label='Average head in second aquifer (modelled)', c='royalblue', linestyle='-.', linewidth=2)
# plt.plot(X, phi[2, :], label='Average head in third aquifer (modelled)', c='darkgreen', linestyle=':', linewidth=2)
# plt.title('Heads in aquifers', fontsize=22)
plt.xlabel('Distance along cross-section [m]', size=14)
plt.ylabel('Head [m]', size=14)
leg = plt.legend(fontsize=14, loc='best')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
leg.get_lines()[0].set_linewidth(2)
leg.get_lines()[1].set_linewidth(2)
leg.get_lines()[2].set_linewidth(4)
plt.show()
if plot_q is True:
## To go from m2/d to Mm3/y:
# qleft_region = qleft_region*12000*365/1000000
# qleft_Rijnstr = qleft_Rijnstr*12000*365/1000000
# qright_region = qright_region*12000*365/1000000
# qright_Rijnstr = qright_Rijnstr*12000*365/1000000
# q = q*12000*365/1000000
plt.figure(figsize=(15, 8))
plt.axhline(0, linewidth=2, c='white')
plt.plot([0, 0, 2000, 2000], [qleft_Rijnstr, qleft_region, qright_Rijnstr, qright_region], 'o', c='black')
plt.text(-400, qleft_Rijnstr, round(qleft_Rijnstr, 1), size=12)
plt.text(2100, qright_Rijnstr, round(qright_Rijnstr, 1), size=12)
plt.text(100, qleft_region, round(qleft_region, 1), size=12)
plt.text(1600, qright_region, round(qright_region, 1), size=12)
plt.axvline(0, c='dimgrey', linestyle=':', label='Fixed section separation')
plt.axvline(x_left, c='darkgrey', linestyle=':', label='Flexible section separation')
plt.axvline(x_right, c='darkgrey', linestyle=':')
plt.axvline(2000, c='dimgrey', linestyle=':')
plt.axvline(3000, c='dimgrey', linestyle=':')
plt.axvline(5500, c='dimgrey', linestyle=':')
plt.axvline(-3700, c='dimgrey', linestyle=':')
plt.axvline(-3100, c='dimgrey', linestyle=':')
v = qsum[:].min() + 0.08*qsum[:].min()
marge = v*0.02
plt.plot([0, 2000], [v + marge, v + marge], c='grey')
plt.plot([-3700, -3100], [v + marge, v + marge], c='grey')
plt.text(1000, v, 'Rijnstrangen', color='grey', horizontalalignment='center', fontsize=14)
plt.text(-3400, v, 'Rhine', color='grey', horizontalalignment='center', fontsize=14)
# perc_Rijnstr = str(round(Q_perc_Rijnstr, 2))
# perc_region = str(round(Q_perc_region, 2))
# plt.text(-3500, max(q[0, :]+2*marge), 'Fraction from Rijnstrangen: %s \n Fraction from region: %s' %(perc_Rijnstr, perc_region))
plt.plot(X, q[0, :], label='q in first aquifer', c='darkred', linestyle='--')
## ONLY FOR MULTY LAYER SYSTEM:
# plt.plot(X, q[1, :], label='q in second aquifer', c='royalblue', linestyle='-.')
# plt.plot(X, q[2, :], label='q in third aquifer', c='darkgreen', linestyle=':')
# plt.plot(X, qsum, label='Total q', c='black', linestyle='-', linewidth=2)
plt.suptitle('Groundwater flux', fontsize=22, y=0.95)
plt.title('positive = northward; negative = southward', fontsize=18)
plt.xlabel('Distance along cross-section [m]', size=14)
plt.ylabel('Flux [m$^2$/d]', size=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
leg = plt.legend(fontsize=14, loc='best')
leg.get_lines()[0].set_linewidth(2)
leg.get_lines()[1].set_linewidth(2)
leg.get_lines()[2].set_linewidth(4)
## ONLY FOR MULTY LAYER SYSTEM:
# leg.get_lines()[3].set_linewidth(3.5)
# leg.get_lines()[4].set_linewidth(3.5)
# leg.get_lines()[5].set_linewidth(3.5)
plt.show()
if plot_s is True:
plt.figure(figsize=(15, 10))
plt.axvline(0, c='dimgrey', linestyle=':', label='Fixed section separation')
plt.axvline(x_left, c='darkgrey', linestyle=':', label='Flexible section separation')
plt.axvline(x_right, c='darkgrey', linestyle=':')
plt.axvline(2000, c='dimgrey', linestyle=':')
plt.axvline(3000, c='dimgrey', linestyle=':')
plt.axvline(3000, c='dimgrey', linestyle=':')
plt.axvline(-3700, c='dimgrey', linestyle=':')
plt.axvline(-3100, c='dimgrey', linestyle=':')
v = s[0,:].min() -0.005
marge = -0.001
plt.plot([0, 2000], [v + marge, v + marge], c='grey')
plt.plot([-3700, -3100], [v + marge, v + marge], c='grey')
plt.text(1000, v, 'Rijnstrangen', color='grey', horizontalalignment='center', fontsize=14)
plt.text(-3400, v, 'Rhine', color='grey', horizontalalignment='center', fontsize=14)
plt.plot(X, s[0, :], label='Seepage from first aquifer', c='darkred', linestyle='--', linewidth=2)
## ONLY FOR MULTI LAYER SYSTEM
# plt.plot(X, s[1, :], label='Seepage from second aquifer', c='royalblue', linestyle='-.', linewidth=2)
# plt.plot(X, s[2, :], label='Seepage from third aquifer', c='darkgreen', linestyle=':', linewidth=2)
plt.title('Seepage', fontsize=22)
plt.xlabel('Distance along cross-section [m]')
plt.ylabel('Seepage [m/d]')
leg = plt.legend(fontsize=14, loc='best')
leg.get_lines()[0].set_linewidth(2)
leg.get_lines()[1].set_linewidth(2)
leg.get_lines()[2].set_linewidth(3.5)
## ONLY FOR MULTI LAYER SYSTEM
# leg.get_lines()[3].set_linewidth(3.5)
# leg.get_lines()[4].set_linewidth(3.5)
plt.show()
return qtot, qsum, x_left, x_right, Q_perc_region
def volume_Rijnstrangen(scenario, V_max, weir_width, weir_height, date_from, date_to, Q_GWleft, Q_GWright):
## options for scenario: "Historical", "Reference", "2050GL" and "2050WH"
## all other input values are defined in the parameter section below
df_volume = read_climate(scenario=scenario, date_start=date_from, date_end=date_to)
df_volume['P'] = df_volume.P / 1000 * A_tot / 1000000 ## mm/day to m/day to m3/day to Mm3/day
df_volume['P_cumulative'] = df_volume.P * 0
df_volume['WL'] = read_Rhine(scenario=scenario, date_start=date_from, date_end=date_to).WL
df_volume['Q'] = df_volume.P * 0
df_volume['Q_cumulative'] = df_volume.Q * 0
df_volume['Outflow'] = df_volume.Q * 0
df_volume['Outflow_cumulative'] = df_volume.Q * 0
df_volume['E0'] = df_volume.Q * 0
df_volume['E0_cumulative'] = df_volume.Q * 0
df_volume['V'] = df_volume.Q * 0
df_volume['V_cumulative'] = df_volume.Q * 0
df_volume['h'] = df_volume.Q * 0
df_volume['GW'] = df_volume.Q * 0
df_volume['GW_cumulative'] = df_volume.Q * 0
df_volume['perc_reg'] = df_volume.Q * 0
gw_dict = {} ## this is a dictionary
perc_dict = {}
min_h_rijnstrangen = 10.125
max_h_rijnstrangen = 15.375
step_size_h_rijnstrangen = 0.25
gw_dict[10] = groundwater(10, Q_GWleft, Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[0]
perc_dict[10] = groundwater(10, Q_GWleft, Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
gw_dict[15] = groundwater(10, Q_GWleft, Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[0]
perc_dict[15] = groundwater(10, Q_GWleft, Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
for h_rijnstrangen in np.arange(min_h_rijnstrangen, max_h_rijnstrangen + step_size_h_rijnstrangen, step_size_h_rijnstrangen):
gw_dict[h_rijnstrangen] = groundwater(h_rijnstrangen, Q_GWleft, Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[0]
perc_dict[h_rijnstrangen] = groundwater(h_rijnstrangen, Q_GWleft, Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
V = V_start
E0_cumulative = 0
GW_cumulative = 0
P_cumulative = 0
Outflow_cumulative = 0
Q_cumulative = 0
Q_daycount = 0
for index, row in df_volume.iterrows():
h = lookup_V(V)[0]
row['h'] = h
A_wet = lookup_V(V)[1]
## Makkink evaporation times correction factor gives evaporation in mm/day, to m/day, times m2 gives m3/day, to Mm3/day
E0 = ((row['EV'] * c_EV_land / 1000 * (A_tot - A_wet)) + (row['EV'] * c_EV_water / 1000 * A_wet)) / 1000000
row['E0'] = E0
E0_cumulative += E0
row['E0_cumulative'] = E0_cumulative
P_cumulative += row['P']
row['P_cumulative'] = P_cumulative
if row['WL'] > weir_height and row['WL'] > row['h']:
if V >= V_max:
Q = 0
else:
h1 = row['WL'] - weir_height ## h1 = upstream water level = water level Rhine
h3 = row['h'] - weir_height ## h3 = downstream water level = water level Rijnstrangen
## For free flowing weir (volkomen overlaat):
if h3 <= 2/3*h1:
Q_old = 1.7 * discharge_coeff_free * weir_width * h1**(3/2) ## m3/sec
Q = Q_old * 60 * 60 * 24 / 1000000 ## Mm3/day
## For submerged weir flow (onvolkomen overlaat):
if h3 > 2/3*h1:
Q_old = discharge_coeff_subm * weir_width * h3 * np.sqrt(2*9.81*(h1 - h3)) ## m3/sec
Q = Q_old * 60 * 60 * 24 / 1000000 ## Mm3/day
row['Q'] = Q ## store Q in dataframe
GW = gw_dict[h]
row['GW'] = GW
GW_cumulative += GW
row['GW_cumulative'] = GW_cumulative
perc_reg = perc_dict[h]
row['perc_reg'] = perc_reg
V += row['Q'] - E0 + row['P'] - GW
if row['Q'] > 0:
h_new = lookup_V(V)[0]
if h_new > row['WL']:
h_cor = row['WL']
V_cor = lookup_h(h_cor)[0]
Q_cor = row['Q'] - (V - V_cor)
if Q_cor > 0:
row['Q'] = row['Q'] - (V - V_cor)
V = V_cor
if V >= V_max:
row['Q'] = row['Q'] - (V - V_max)
V = V_max
if V <= 0:
V = 0
if row['Q'] < 0:
row['Outflow'] = -row['Q']
row['Q'] = 0
Outflow_cumulative += row['Outflow']
row['Outflow_cumulative'] = Outflow_cumulative
if row['Q'] > 0:
Q_daycount += row['Q']/row['Q']
Q_cumulative += row['Q']
row['Q_cumulative'] = Q_cumulative
row['V'] = V
# print('Precipitation mean:', round(np.mean(df_volume.P*1000000/A_tot*1000),3), 'mm/day')
# print('Evaporation mean:', round(np.mean(df_volume.E0 * 1000000 / A_tot * 1000), 3), 'mm/day')
# print('Inflow mean:', round(np.mean(df_volume.Q * 1000000 / A_tot * 1000), 3), 'mm/day')
# print('Groundwater mean:', round(np.mean(df_volume.GW * 1000000 / A_tot * 1000), 3), 'mm/day')
# print('Of which', round((Q_GWright + Q_GWleft) * 12000 / A_tot * 1000, 3), 'mm/day is extracted for water use')
# print('Outflow mean:', round(np.mean(df_volume.Outflow * 1000000 / A_tot * 1000), 3), 'mm/day')
# print(Q_daycount)
# print('mean volume:', df_volume['V'].mean())
return df_volume
def plot_volume_Rijnstrangen(scenario, date_from, date_to, Q_GWleft, Q_GWright, plottype):
## options for scenario: "Historical", "Reference", "2050GL" and "2050WH"
## options for plottype: "normal", "cumulative" and "multiple"
## all other input values are defined in the parameter section below
if plottype == 'normal':
df_volume = volume_Rijnstrangen(scenario=scenario, V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=date_from, date_to=date_to, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
params = {'legend.fontsize': 14,
'axes.labelsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
pylab.rcParams.update(params)
## plot water levels:
plt.figure()
plt.plot(df_volume.h)
plt.title('Water level')
## plot percentage of extracted gw coming from surroundings
plt.figure()
plt.plot(df_volume.perc_reg)
plt.ylim(-0.05, 1.05)
plt.title('Fraction of extracted water \n coming from surrounding Region')
## plot whole water balance:
# fig, axs = plt.subplots(3, 1, gridspec_kw={'height_ratios': [1, 0.5, 2]})
fig, axs = plt.subplots(2, 1, gridspec_kw={'height_ratios': [1, 2]})
# axs[1].plot(df_volume.perc_reg, color='purple')
# axs[1].set_ylim(-0.05, 1.05)
# axs[1].set_ylabel('Fraction from \n surroundings')
axs[0].set_ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
axs[0].plot(df_volume.V, color='red', label='Volume')
axs[0].axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = axs[0].legend(loc='lower right')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(4)
axs[1].set_ylabel('Fluxes [mm/day]')
axs[1].plot(df_volume.P * 1000000 / A_tot * 1000, color='lightblue', label='Q$_{Precipitation}$')
axs[1].plot(-df_volume.E0 * 1000000 / A_tot * 1000, color='darkgreen', label='Q$_{Evaporation}$')
axs[1].plot((df_volume.Q - df_volume.Outflow) * 1000000 / A_tot * 1000 / 10, color='blue',
label='Q$_{Rhine,net}$ / 10')
axs[1].plot(-df_volume.GW * 1000000 / A_tot * 1000, color='black', label='Q$_{Groundwater}$')
# axs[1].set_ylim(-12, 50)
leg = axs[1].legend(loc='upper right')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
plt.show()
if plottype == 'cumulative':
df_volume = volume_Rijnstrangen(scenario=scenario, V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=date_from, date_to=date_to, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
plt.figure()
plt.plot(df_volume.P_cumulative, color='lightblue', label='Q$_{Precipitation}$')
plt.fill_between(df_volume.index, df_volume.P_cumulative, 0, color='lightblue', alpha=0.3)
plt.plot((df_volume.Q_cumulative - df_volume.Outflow_cumulative) + df_volume.P_cumulative, color='blue', label='Q$_{Rhine,net}$')
plt.fill_between(df_volume.index, (df_volume.Q_cumulative - df_volume.Outflow_cumulative) + df_volume.P_cumulative, df_volume.P_cumulative, color='blue', alpha=0.3)
plt.plot(-df_volume.E0_cumulative, color='darkgreen', label='Q$_{Evaporation}$')
plt.fill_between(df_volume.index, -df_volume.E0_cumulative, 0, color='darkgreen', alpha=0.3)
plt.plot(-df_volume.GW_cumulative - df_volume.E0_cumulative, color='black', label='Q$_{Groundwater}$')
plt.fill_between(df_volume.index, -df_volume.E0_cumulative, -df_volume.GW_cumulative - df_volume.E0_cumulative, color='black', alpha=0.3)
plt.plot((df_volume.V) * 10, ':', c='red', label='Resulting volume Rijnstrangen $\cdot$ 10')
plt.legend(loc='best')
plt.ylabel('Volume [Mm$^3$]')
leg = plt.legend()
leg.get_lines()[0].set_linewidth(5)
leg.get_lines()[1].set_linewidth(5)
leg.get_lines()[2].set_linewidth(5)
leg.get_lines()[3].set_linewidth(5)
leg.get_lines()[4].set_linewidth(4)
plt.show()
if plottype == 'multiple':
params = {'legend.fontsize': 14,
'axes.labelsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
pylab.rcParams.update(params)
df_volume_hist = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=date_from, date_to=date_to, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
df_volume_ref = volume_Rijnstrangen(scenario='Reference', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=date_from, date_to=date_to, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
df_volume_2050GL = volume_Rijnstrangen(scenario='2050GL', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=date_from, date_to=date_to, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
df_volume_2050WH = volume_Rijnstrangen(scenario='2050WH', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=date_from, date_to=date_to, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
## plot water levels:
plt.figure()
plt.plot(df_volume_hist.h, color='darkslategrey')
plt.plot(df_volume_ref.h, color='royalblue')
plt.plot(df_volume_2050GL.h, color='olivedrab')
plt.plot(df_volume_2050WH.h, color='firebrick')
plt.title('Water level')
## plot percentage of extracted gw coming from surroundings
plt.figure()
plt.plot(df_volume_hist.perc_reg, '.--', color='darkslategrey')
plt.plot(df_volume_ref.perc_reg, '.--', color='royalblue')
plt.plot(df_volume_2050GL.perc_reg, '.--', color='olivedrab')
plt.plot(df_volume_2050WH.perc_reg, '.--', color='firebrick')
plt.ylim(-0.05, 1.05)
plt.title('Fraction of extracted water \n coming from surrounding Region')
## plot WB only
plt.figure(figsize=(16,4))
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.plot(df_volume_hist.V, color='darkslategrey', label='Historical')
plt.plot(df_volume_ref.V, color='royalblue', label='Reference')
plt.plot(df_volume_2050GL.V, color='olivedrab', label='2050 G$_L$')
plt.plot(df_volume_2050WH.V, color='firebrick', label='2050 W$_H$')
plt.ylim(-3, 63)
plt.axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = plt.legend(loc='lower right')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
leg.get_lines()[4].set_linewidth(3)
## plot WB-duration line only
df_descending_hist = df_volume_hist.sort_values('V', ascending=False)
df_descending_hist.index = np.linspace(0, len(df_descending_hist.V), len(df_descending_hist.V)) / len(df_descending_hist.V) * 100
df_descending_ref = df_volume_ref.sort_values('V', ascending=False)
df_descending_ref.index = np.linspace(0, len(df_descending_ref.V), len(df_descending_ref.V)) / len(df_descending_ref.V) * 100
df_descending_2050GL = df_volume_2050GL.sort_values('V', ascending=False)
df_descending_2050GL.index = np.linspace(0, len(df_descending_2050GL.V), len(df_descending_2050GL.V)) / len(df_descending_2050GL.V) * 100
df_descending_2050WH = df_volume_2050WH.sort_values('V', ascending=False)
df_descending_2050WH.index = np.linspace(0, len(df_descending_2050WH.V), len(df_descending_2050WH.V)) / len(df_descending_2050WH.V) * 100
plt.figure(figsize=(13, 8))
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.xlabel('Percentage of time volume is exceeded')
plt.plot(df_descending_hist.V, color='darkslategrey', label='Historical', linewidth=2)
plt.plot(df_descending_ref.V, color='royalblue', label='Reference', linewidth=2)
plt.plot(df_descending_2050GL.V, color='olivedrab', label='2050 G$_L$', linewidth=2)
plt.plot(df_descending_2050WH.V, color='firebrick', label='2050 W$_H$', linewidth=2)
plt.ylim(-3, 62)
plt.axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = plt.legend(loc='best')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
leg.get_lines()[4].set_linewidth(3)
## plot whole water balance with fluxes:
fig, axs = plt.subplots(2, 1, figsize=(16, 9), gridspec_kw={'height_ratios': [1, 2]})
axs[0].set_ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
axs[0].plot(df_volume_hist.V, color='darkslategrey', label='Historical')
axs[0].plot(df_volume_ref.V, color='royalblue', label='Reference')
axs[0].plot(df_volume_2050GL.V, color='olivedrab', label='2050 G$_L$')
axs[0].plot(df_volume_2050WH.V, color='firebrick', label='2050 W$_H$')
axs[0].set_ylim(-3, 63)
axs[0].axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = axs[0].legend(loc='lower right')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
leg.get_lines()[4].set_linewidth(3)
axs[1].set_ylabel('Fluxes [mm/day]')
axs[1].plot(df_volume_hist.P * 1000000 / A_tot * 1000, color='lightblue', label='Q$_{Precipitation}$')
axs[1].plot(-df_volume_hist.E0 * 1000000 / A_tot * 1000, color='darkgreen', label='Q$_{Evaporation}$')
axs[1].plot((df_volume_hist.Q - df_volume_hist.Outflow) * 1000000 / A_tot * 1000 / 50, color='blue', label='Q$_{Rhine,net}$ / 50')
axs[1].plot(-df_volume_hist.GW * 1000000 / A_tot * 1000, color='black', label='Q$_{Groundwater}$')
leg = axs[1].legend(loc='upper right')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
plt.show()
return
def plot_volume_narratives(V_max, Q_GW, date_from, date_to):
df_volume = volume_Rijnstrangen(scenario='2050GL', V_max=V_max, weir_width=weir_width, weir_height=weir_height,
date_from=date_from, date_to=date_to, Q_GWleft=Q_GW, Q_GWright=Q_GW)
params = {'legend.fontsize': 14,
'axes.labelsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
pylab.rcParams.update(params)
## plot water levels:
plt.figure()
plt.plot(df_volume.h)
plt.title('Water level')
## plot percentage of extracted gw coming from surroundings
plt.figure()
plt.plot(df_volume.perc_reg)
plt.ylim(-0.05, 1.05)
plt.title('Fraction of extracted water \n coming from surrounding Region')
## plot whole water balance:
fig, axs = plt.subplots(3, 1, gridspec_kw={'height_ratios': [1, 1, 2]})
# print('Vmax:', V_max, 'Quse:', 2*Q_GW*365*12000, 'Average percentage from region:', round(np.mean(df_volume.perc_reg),4)*100)
axs[0].set_ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
axs[0].plot(df_volume.V, color='red', label='Volume')
axs[0].axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = axs[0].legend(loc='lower right')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(4)
axs[1].plot(df_volume.perc_reg, color='purple')
axs[1].set_ylim(-0.05, 1.05)
axs[1].set_ylabel('Fraction from \n surroundings')
axs[2].set_ylabel('Fluxes [mm/day]')
axs[2].plot(df_volume.P * 1000000 / A_tot * 1000, color='lightblue', label='Q$_{Precipitation}$')
axs[2].plot(-df_volume.E0 * 1000000 / A_tot * 1000, color='darkgreen', label='Q$_{Evaporation}$')
axs[2].plot((df_volume.Q - df_volume.Outflow) * 1000000 / A_tot * 1000 / 50, color='blue',
label='Q$_{Rhine,net}$ / 50')
axs[2].plot(-df_volume.GW * 1000000 / A_tot * 1000, color='black', label='Q$_{Groundwater}$')
leg = axs[2].legend(loc='upper right')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
plt.show()
## PARAMETERS (don't change):
A_tot = 28646820
discharge_coeff_free = 1
discharge_coeff_subm = 1
c_EV_land = 0.85
c_EV_water = 1.25
## VARIABLES (change :-) ):
## time span
start = 1960
end = 2009
## weir configuration
weir_width = 500 ## 500 = neutral
weir_height = 12.5 ## 12.5 = neutral
## basin
V_max = 58 ## 58 = neutral = corresponding to water level of 14.0 m +NAP
V_start = 22300000 / 1000000 ## 22.3 = neutral = corresponding to water level of 12.5 m +NAP
## water use
Q_GWleft = -25000000/365/12000 ## -25
Q_GWright = -25000000/365/12000 ## -25
# Q_GWleft = 0
# Q_GWright = 0
# groundwater(h_Rijnstrangen=11.0, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=True, plot_s=False)
# plot_volume_Rijnstrangen(scenario='Historical', plottype='multiple', date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
# plot_volume_narratives(V_max, Q_GW=Q_GWleft, date_from=start, date_to=end)
## This function plots the groundwater flow for multiple Quse values.
def GW_multiple_Quse_plot(h_Rijnstrangen):
for q in np.arange(50000000, 0, -12500000):
Q_GW = -q / 365 / 12000
qsum = groundwater(h_Rijnstrangen=h_Rijnstrangen, Q_GWleft=Q_GW, Q_GWright=Q_GW, plot_phi=False, plot_q=False, plot_s=False)[1]
X = np.arange(-3700, 5510, 10)
plt.plot(X, qsum, label='GW flow for Q$_{use}$ = 2 x ' + str(Q_GW*12000*365/1000000) + ' Mm$^3$/year', linestyle='-', linewidth=2)
for q in np.arange(100000000, 0, -50000000):
Q_GW = -q / 365 / 12000
qsum = groundwater(h_Rijnstrangen=h_Rijnstrangen, Q_GWleft=Q_GW, Q_GWright=0, plot_phi=False, plot_q=False, plot_s=False)[1]
X = np.arange(-3700, 5510, 10)
plt.plot(X, qsum, label='GW flow for Q$_{use, left}$ = ' + str(Q_GW*12000*365/1000000) + ' Mm$^3$/year', linestyle='-', linewidth=2)
for q in np.arange(50000000, 0, -25000000):
Q_GW = -q / 365 / 12000
qsum = groundwater(h_Rijnstrangen=h_Rijnstrangen, Q_GWleft=0, Q_GWright=Q_GW, plot_phi=False, plot_q=False, plot_s=False)[1]
X = np.arange(-3700, 5510, 10)
plt.plot(X, qsum, label='GW flow for Q$_{use, right}$ = ' + str(Q_GW*12000*365/1000000) + ' Mm$^3$/year', linestyle='-', linewidth=2)
for q in np.arange(0, -1, -1):
Q_GW = -q / 365 / 12000
qsum = groundwater(h_Rijnstrangen=h_Rijnstrangen, Q_GWleft=0, Q_GWright=Q_GW, plot_phi=False, plot_q=False, plot_s=False)[1]
X = np.arange(-3700, 5510, 10)
plt.plot(X, qsum, label='GW flow for Q$_{use}$ = ' + str(Q_GW*12000*365/1000000) + ' Mm$^3$/year', linestyle='-', linewidth=2)
plt.axvline(0, c='dimgrey', linestyle=':', label='Section separation')
plt.axvline(2000, c='dimgrey', linestyle=':')
plt.axvline(3000, c='dimgrey', linestyle=':')
plt.axvline(5500, c='dimgrey', linestyle=':')
plt.axvline(-3700, c='dimgrey', linestyle=':')
plt.axvline(-3100, c='dimgrey', linestyle=':')
v = -10
marge = v * 0.01
plt.plot([0, 2000], [v + marge, v + marge], c='grey')
plt.plot([-3700, -3100], [v + marge, v + marge], c='grey')
plt.text(1000, v, 'Rijnstrangen', color='grey', horizontalalignment='center', fontsize=14)
plt.text(-3400, v, 'Rhine', color='grey', horizontalalignment='center', fontsize=14)
plt.suptitle('Groundwater flux', fontsize=22, y=0.95)
plt.title('h$_{Rijnstrangen}$ = ' + str(h_Rijnstrangen) + ' m+NAP; positive = northward; negative = southward', fontsize=18)
plt.xlabel('Distance along cross-section [m]', size=14)
plt.ylabel('Flux [m2/d]', size=14)
plt.legend(fontsize=14, loc='best')
plt.show()
# GW_multiple_Quse_plot(12.5)
## This function plots the water balance for multiple Quse values.
def WB_multiple_Quse_plot():
params = {'legend.fontsize': 14,
'axes.labelsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
pylab.rcParams.update(params)
## With fluxes:
fig, axs = plt.subplots(2, 1, figsize=(16, 9), gridspec_kw={'height_ratios': [1, 2]})
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
axs[1].plot(df_volume.P * 1000000 / A_tot * 1000, color='lightblue', label='Q$_{Precipitation}$')
axs[1].plot(-df_volume.E0 * 1000000 / A_tot * 1000, color='darkgreen', label='Q$_{Evaporation}$')
axs[1].plot((df_volume.Q - df_volume.Outflow) * 1000000 / A_tot * 1000 / 50, color='blue',
label='Q$_{Rhine,net}$ / 50')
for q in np.arange(50000000, -1, -10000000):
Q_GW = -q/365/12000
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GW, Q_GWright=Q_GW)
axs[0].set_ylabel('Volume in \n Rijnstrangen [Mm$^3$]', size=14)
axs[0].plot(df_volume.V, label = 'Q$_{use}$ = 2 x ' + str(q/1000000) + ' Mm$^3$/y')
axs[1].plot(-df_volume.GW * 1000000 / A_tot * 1000, label='Q$_{GW}$ for Q$_{use}$ = 2 x ' + str(q/1000000) + ' Mm$^3$/y')
leg1 = axs[0].legend(loc='lower right', fontsize=14)
leg1.get_lines()[0].set_linewidth(4)
leg1.get_lines()[1].set_linewidth(4)
leg1.get_lines()[2].set_linewidth(4)
leg1.get_lines()[3].set_linewidth(4)
leg1.get_lines()[4].set_linewidth(4)
leg1.get_lines()[5].set_linewidth(4)
axs[1].set_ylabel('Fluxes [mm/day]', size=14)
axs[0].set_ylim(-3, 63)
leg = plt.legend(loc='upper right', fontsize=14)
leg.get_lines()[0].set_linewidth(4)
leg.get_lines()[1].set_linewidth(4)
leg.get_lines()[2].set_linewidth(4)
leg.get_lines()[3].set_linewidth(4)
leg.get_lines()[4].set_linewidth(4)
leg.get_lines()[5].set_linewidth(4)
leg.get_lines()[6].set_linewidth(4)
leg.get_lines()[7].set_linewidth(4)
leg.get_lines()[8].set_linewidth(4)
## Without fluxes:
plt.figure(figsize=(16, 4))
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.ylim(-3, 63)
for q in np.arange(50000000, -1, -10000000):
Q_GW = -q/365/12000
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GW, Q_GWright=Q_GW)
plt.plot(df_volume.V, label = 'Q$_{use}$ = 2 x ' + str(q/1000000) + ' Mm$^3$/y')
leg1 = plt.legend(loc='lower right', fontsize=14)
leg1.get_lines()[0].set_linewidth(4)
leg1.get_lines()[1].set_linewidth(4)
leg1.get_lines()[2].set_linewidth(4)
leg1.get_lines()[3].set_linewidth(4)
leg1.get_lines()[4].set_linewidth(4)
leg1.get_lines()[5].set_linewidth(4)
## Duration line
plt.figure(figsize=(13, 8))
for q in np.arange(50000000, -1, -10000000):
Q_GW = -q/365/12000
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GW, Q_GWright=Q_GW)
df_descending = df_volume.sort_values('V', ascending=False)
df_descending.index = np.linspace(0, len(df_descending.V), len(df_descending.V)) / len(df_descending.V) * 100
plt.plot(df_descending.V, label='Q$_{use}$ = 2 x ' + str(q/1000000) + ' Mm$^3$/y', linewidth=2)
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.xlabel('Percentage of time volume is exceeded')
plt.ylim(-3, 62)
plt.axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = plt.legend(loc='best')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
leg.get_lines()[4].set_linewidth(6)
leg.get_lines()[5].set_linewidth(6)
leg.get_lines()[6].set_linewidth(3)
plt.show()
# WB_multiple_Quse_plot()
## This funtion plots the water balance for multiple Vmax values.
def WB_multiple_Vmax_plot():
params = {'legend.fontsize': 18,
'axes.labelsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18}
pylab.rcParams.update(params)
## With fluxes:
fig, axs = plt.subplots(2, 1, figsize=(16, 9), gridspec_kw={'height_ratios': [1, 2]})
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
axs[1].plot(df_volume.P * 1000000 / A_tot * 1000, color='lightblue', label='Q$_{Precipitation}$')
axs[1].plot(-df_volume.E0 * 1000000 / A_tot * 1000, color='darkgreen', label='Q$_{Evaporation}$')
axs[1].plot((df_volume.Q - df_volume.Outflow) * 1000000 / A_tot * 1000 / 50, color='blue', label='Q$_{Rhine,net}$ / 50')
for Vmax in np.arange(100, 0, -20):
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=Vmax, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
axs[0].set_ylabel('Volume in \n Rijnstrangen [Mm$^3$]', size=14)
axs[0].plot(df_volume.V, label = 'V$_{max}$ = ' + str(Vmax) + ' Mm$^3$')
axs[1].plot(-df_volume.GW * 1000000 / A_tot * 1000, label='Q$_{GW}$ for V$_{max}$ = ' + str(Vmax) + ' Mm$^3$')
leg1 = axs[0].legend(loc='lower right', fontsize=14)
leg1.get_lines()[0].set_linewidth(4)
leg1.get_lines()[1].set_linewidth(4)
leg1.get_lines()[2].set_linewidth(4)
leg1.get_lines()[3].set_linewidth(4)
leg1.get_lines()[4].set_linewidth(4)
axs[1].set_ylabel('Fluxes [mm/day]', size=14)
# axs[1].set_ylim(-12, 50)
axs[0].set_ylim(-5, 105)
leg = plt.legend(loc='upper right', fontsize=14)
leg.get_lines()[0].set_linewidth(4)
leg.get_lines()[1].set_linewidth(4)
leg.get_lines()[2].set_linewidth(4)
leg.get_lines()[3].set_linewidth(4)
leg.get_lines()[4].set_linewidth(4)
leg.get_lines()[5].set_linewidth(4)
leg.get_lines()[6].set_linewidth(4)
leg.get_lines()[7].set_linewidth(4)
## Without fluxes:
plt.figure(figsize=(16, 4))
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
for Vmax in np.arange(100, 0, -20):
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=Vmax, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
plt.plot(df_volume.V, label = 'V$_{max}$ = ' + str(Vmax) + ' Mm$^3$')
leg1 = plt.legend(loc='lower right', fontsize=14)
leg1.get_lines()[0].set_linewidth(4)
leg1.get_lines()[1].set_linewidth(4)
leg1.get_lines()[2].set_linewidth(4)
leg1.get_lines()[3].set_linewidth(4)
leg1.get_lines()[4].set_linewidth(4)
## Duration line
plt.figure(figsize=(13, 8))
for Vmax in np.arange(100, 0, -20):
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=Vmax, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
df_descending = df_volume.sort_values('V', ascending=False)
df_descending.index = np.linspace(0, len(df_descending.V), len(df_descending.V)) / len(df_descending.V) * 100
plt.plot(df_descending.loc[0:100, 'V'], label='V$_{max}$ = ' + str(Vmax) + ' Mm$^3$', linewidth=3)
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.xlabel('Percentage of time volume is exceeded')
plt.ylim(-3, 62)
plt.axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = plt.legend(loc='best')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
leg.get_lines()[4].set_linewidth(6)
leg.get_lines()[5].set_linewidth(6)
leg.get_lines()[6].set_linewidth(3)
plt.show()
# WB_multiple_Vmax_plot()
## This function plots the water balance for multiple weir widths.
def WB_multiple_width_plot():
params = {'legend.fontsize': 14,
'axes.labelsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
pylab.rcParams.update(params)
## with fluxes:
fig, axs = plt.subplots(2, 1, figsize=(16, 9), gridspec_kw={'height_ratios': [1, 2]})
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
axs[1].plot(df_volume.P * 1000000 / A_tot * 1000, color='lightblue', label='Q$_{Precipitation}$')
axs[1].plot(-df_volume.E0 * 1000000 / A_tot * 1000, color='darkgreen', label='Q$_{Evaporation}$')
axs[1].plot((df_volume.Q - df_volume.Outflow) * 1000000 / A_tot * 1000 / 50, color='blue', label='Q$_{Rhine,net}$ / 50')
axs[1].plot(-df_volume.GW * 1000000 / A_tot * 1000, color='black', label='Q$_{groundwater}$')
for width in np.arange(1200, 0, -200):
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
axs[0].set_ylabel('Volume in \n Rijnstrangen [Mm$^3$]', size=14)
axs[0].plot(df_volume.V, label = 'Weir width = ' + str(width) + ' m')
leg1 = axs[0].legend(loc='lower right', fontsize=14)
leg1.get_lines()[0].set_linewidth(4)
leg1.get_lines()[1].set_linewidth(4)
leg1.get_lines()[2].set_linewidth(4)
leg1.get_lines()[3].set_linewidth(4)
leg1.get_lines()[4].set_linewidth(4)
leg1.get_lines()[5].set_linewidth(4)
axs[1].set_ylabel('Fluxes [mm/day]', size=14)
# axs[1].set_ylim(-12, 50)
axs[0].set_ylim(-3, 63)
leg = plt.legend(loc='upper right', fontsize=14)
leg.get_lines()[0].set_linewidth(4)
leg.get_lines()[1].set_linewidth(4)
leg.get_lines()[2].set_linewidth(4)
leg.get_lines()[3].set_linewidth(4)
## Without fluxes:
plt.figure(figsize=(16, 4))
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.ylim(-3, 63)
for width in [250, 200, 150, 100, 50]:
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
plt.plot(df_volume.V, label = 'Weir width = ' + str(width) + ' m')
leg1 = plt.legend(loc='lower right', fontsize=14)
leg1.get_lines()[0].set_linewidth(4)
leg1.get_lines()[1].set_linewidth(4)
leg1.get_lines()[2].set_linewidth(4)
leg1.get_lines()[3].set_linewidth(4)
leg1.get_lines()[4].set_linewidth(4)
leg1.get_lines()[5].set_linewidth(4)
## Duration line
plt.figure(figsize=(13, 8))
for width in [500, 250, 100, 50, 25, 10]:
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
df_descending = df_volume.sort_values('V', ascending=False)
df_descending.index = np.linspace(0, len(df_descending.V), len(df_descending.V)) / len(df_descending.V) * 100
plt.plot(df_descending.loc[0:100, 'V'], label='Weir width = ' + str(width) + ' m', linewidth=3)
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.xlabel('Percentage of time volume is exceeded')
plt.ylim(-3, 62)
plt.axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = plt.legend(loc='best')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
leg.get_lines()[4].set_linewidth(6)
leg.get_lines()[5].set_linewidth(6)
leg.get_lines()[6].set_linewidth(3)
plt.show()
# WB_multiple_width_plot()
## This function plots the water balance for multiple weir heights.
def WB_multiple_height_plot():
params = {'legend.fontsize': 14,
'axes.labelsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
pylab.rcParams.update(params)
## With fluxes:
fig, axs = plt.subplots(2, 1, figsize=(16, 9), gridspec_kw={'height_ratios': [1, 2]})
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=weir_height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
axs[1].plot(df_volume.P * 1000000 / A_tot * 1000, color='lightblue', label='Q$_{Precipitation}$')
axs[1].plot(-df_volume.E0 * 1000000 / A_tot * 1000, color='darkgreen', label='Q$_{Evaporation}$')
axs[1].plot((df_volume.Q - df_volume.Outflow) * 1000000 / A_tot * 1000 / 50, color='blue', label='Q$_{Rhine,net}$ / 50')
axs[1].plot(-df_volume.GW * 1000000 / A_tot * 1000, color='black', label='Q$_{groundwater}$')
for height in np.arange(15, 9, -1):
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
axs[0].set_ylabel('Volume in \n Rijnstrangen [Mm$^3$]', size=14)
axs[0].plot(df_volume.V, label = 'Weir height = ' + str(height) + ' m')
leg1 = axs[0].legend(loc='lower right', fontsize=14)
leg1.get_lines()[0].set_linewidth(4)
leg1.get_lines()[1].set_linewidth(4)
leg1.get_lines()[2].set_linewidth(4)
leg1.get_lines()[3].set_linewidth(4)
leg1.get_lines()[4].set_linewidth(4)
leg1.get_lines()[5].set_linewidth(4)
axs[1].set_ylabel('Fluxes [mm/day]', size=14)
axs[0].set_ylim(-3, 63)
leg = plt.legend(loc='upper right', fontsize=14)
leg.get_lines()[0].set_linewidth(4)
leg.get_lines()[1].set_linewidth(4)
leg.get_lines()[2].set_linewidth(4)
leg.get_lines()[3].set_linewidth(4)
## Without fluxes:
plt.figure(figsize=(16, 4))
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.ylim(-3, 63)
for height in np.arange(15, 9, -1):
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
plt.plot(df_volume.V, label = 'Weir height = ' + str(height) + ' m')
leg1 = plt.legend(loc='lower right', fontsize=14)
leg1.get_lines()[0].set_linewidth(4)
leg1.get_lines()[1].set_linewidth(4)
leg1.get_lines()[2].set_linewidth(4)
leg1.get_lines()[3].set_linewidth(4)
leg1.get_lines()[4].set_linewidth(4)
leg1.get_lines()[5].set_linewidth(4)
## Duration line
plt.figure(figsize=(13, 8))
for height in np.arange(15, 9, -1):
df_volume = volume_Rijnstrangen(scenario='Historical', V_max=V_max, weir_width=weir_width, weir_height=height, date_from=start, date_to=end, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright)
df_descending = df_volume.sort_values('V', ascending=False)
df_descending.index = np.linspace(0, len(df_descending.V), len(df_descending.V)) / len(df_descending.V) * 100
plt.plot(df_descending.loc[0:100, 'V'], label='Weir height = ' + str(height) + ' m', linewidth=3)
plt.ylabel('Volume in \n Rijnstrangen [Mm$^3$]')
plt.xlabel('Percentage of time volume is exceeded')
plt.ylim(-3, 62)
plt.axhline(y=V_max, xmin=0.05, xmax=0.95, color='darksalmon', linestyle='--', label='Max. volume')
leg = plt.legend(loc='best')
leg.get_lines()[0].set_linewidth(6)
leg.get_lines()[1].set_linewidth(6)
leg.get_lines()[2].set_linewidth(6)
leg.get_lines()[3].set_linewidth(6)
leg.get_lines()[4].set_linewidth(6)
leg.get_lines()[5].set_linewidth(6)
leg.get_lines()[6].set_linewidth(3)
plt.show()
# WB_multiple_height_plot()
## This function prints or plots the percentages of the extraction originating from the region around the Rijnstrangen.
def function_percentages():
## PRINT
for V_max in [0.56, 1.21, 3.6, 7.66, 13.9, 22.32, 32.9, 45.12, 58.4, 72.1, 86.0, 100]:
for Q_GW in [-2500000/365/12000, -5000000/365/12000, -10000000/365/12000, -25000000/365/12000, -50000000/365/12000]:
plot_volume_narratives(V_max, Q_GW, date_from=start, date_to=end) ## comment everything except the "print" part in the plot_volume_narratives function
## PLOT
# V_max = 7.7
# Q_GW = -2000000/365/12000
# print(V_max, Q_GW)
# for width in [500, 400, 300, 200, 100]:
# for height in [15, 14, 13, 12, 11, 10]:
# weir_width = width
# weir_height = height
# plot_volume_narratives(V_max, Q_GW, start,end)
#
# V_max = 22.3
# Q_GW = -25000000/365/12000
# print(V_max, Q_GW)
# for width in [500, 400, 300, 200, 100]:
# for height in [15, 14, 13, 12, 11, 10]:
# weir_width = width
# weir_height = height
# plot_volume_narratives(V_max, Q_GW, start,end)
#
# V_max = 72
# Q_GW = -57000000/365/12000
# print(V_max, Q_GW)
# for width in [500, 400, 300, 200, 100]:
# for height in [15, 14, 13, 12, 11, 10]:
# weir_width = width
# weir_height = height
# plot_volume_narratives(V_max, Q_GW, start,end)
# function_percentages()
## This function plots the relation between the percentage extracted from the region and the water level for various extraction volumes.
def Perc_multiple_Quse_plot():
params = {'legend.fontsize': 14,
'axes.labelsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
pylab.rcParams.update(params)
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = -10000000 / 365 / 12000
Q_GWright = -10000000 / 365 / 12000
perc_reg = groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, 'x:', markersize=10, color='dodgerblue', label='Q$_{tot}$ = -20 Mm$^3$/year, Q$_{left}$ = -10 Mm$^3$/year, Q$_{right}$ = -10 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='blue', label='Q$_{tot}$ = -20 Mm$^3$/year, Q$_{left}$ = -10 Mm$^3$/year, Q$_{right}$ = -10 Mm$^3$/year')
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = -20000000 / 365 / 12000
Q_GWright = 0
perc_reg = groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, '*:', markersize=10, color='dodgerblue', label='Q$_{tot}$ = -20 Mm$^3$/year, Q$_{left}$ = -20 Mm$^3$/year, Q$_{right}$ = 0.0 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='green', label='Q$_{tot}$ = -20 Mm$^3$/year, Q$_{left}$ = -20 Mm$^3$/year, Q$_{right}$ = 0.0 Mm$^3$/year')
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = 0
Q_GWright = -20000000 / 365 / 12000
perc_reg = groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, 'o:', markersize=10, color='dodgerblue', label='Q$_{tot}$ = -20 Mm$^3$/year, Q$_{left}$ = 0.0 Mm$^3$/year, Q$_{right}$ = -20 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='dodgerblue', label='Q$_{tot}$ = -20 Mm$^3$/year, Q$_{left}$ = 0.0 Mm$^3$/year, Q$_{right}$ = -20 Mm$^3$/year')
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = -25000000 / 365 / 12000
Q_GWright = -25000000 / 365 / 12000
perc_reg = groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, 'x:', markersize=10, color='purple', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = -25 Mm$^3$/year, Q$_{right}$ = -25 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='darkorange', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = -25 Mm$^3$/year, Q$_{right}$ = -25 Mm$^3$/year')
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = 0
Q_GWright = -50000000 / 365 / 12000
perc_reg = groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, '*:', markersize=10, color='purple', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = 0.0 Mm$^3$/year, Q$_{right}$ = -50 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='red', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = 0.0 Mm$^3$/year, Q$_{right}$ = -50 Mm$^3$/year')
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = -50000000 / 365 / 12000
Q_GWright = 0
perc_reg = groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, 'o:', markersize=10, color='purple', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = -50 Mm$^3$/year, Q$_{right}$ = 0.0 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='purple', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = -50 Mm$^3$/year, Q$_{right}$ = 0.0 Mm$^3$/year')
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = -50000000 / 365 / 12000
Q_GWright = -50000000 / 365 / 12000
perc_reg = \
groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, 'x:', markersize=10, color='darkorange',
label='Q$_{tot}$ = -100 Mm$^3$/year, Q$_{left}$ = -50 Mm$^3$/year, Q$_{right}$ = -50 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='darkorange', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = -25 Mm$^3$/year, Q$_{right}$ = -25 Mm$^3$/year')
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = 0
Q_GWright = -100000000 / 365 / 12000
perc_reg = \
groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, '*:', markersize=10, color='darkorange',
label='Q$_{tot}$ = -100 Mm$^3$/year, Q$_{left}$ = 0.0 Mm$^3$/year, Q$_{right}$ = -100 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='red', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = 0.0 Mm$^3$/year, Q$_{right}$ = -50 Mm$^3$/year')
perc_list = []
h_list = []
for h in np.arange(9.75, 15.51, 0.25):
Q_GWleft = -100000000 / 365 / 12000
Q_GWright = 0
perc_reg = \
groundwater(h_Rijnstrangen=h, Q_GWleft=Q_GWleft, Q_GWright=Q_GWright, plot_phi=False, plot_q=False, plot_s=False)[4]
perc_list.append(perc_reg)
h_list.append(h)
plt.plot(h_list, perc_list, 'o:', markersize=10, color='darkorange',
label='Q$_{tot}$ = -100 Mm$^3$/year, Q$_{left}$ = -100 Mm$^3$/year, Q$_{right}$ = 0.0 Mm$^3$/year')
# plt.plot(15, 0, 'o', color='purple', label='Q$_{tot}$ = -50 Mm$^3$/year, Q$_{left}$ = -50 Mm$^3$/year, Q$_{right}$ = 0.0 Mm$^3$/year')
plt.xlabel('h in Rijnstrangen [m]')
plt.ylabel('Fraction extracted water from region (not coming from reservoir)')
plt.legend(loc='best')
plt.show()
# Perc_multiple_Quse_plot() |
py | 1a3745438448c0730678fa30e3c892f45c6e4e7c | try:
import simplejson as json
except:
import json
from http.cookiejar import LWPCookieJar
from urllib.parse import urlencode
from urllib.error import HTTPError
from urllib.request import urlopen, Request, build_opener, HTTPCookieProcessor
import logging
import getpass
import time
import os
import os.path
from pprint import pformat
from . import version
__version__ = version.VERSION
logger = logging.getLogger('dimclient')
PROTOCOL_VERSION = 17
class DimError(Exception):
def __init__(self, message, code=1):
Exception.__init__(self, message)
self.code = code
# Defined so that unicode(DimError) won't blow up
def __str__(self):
return self.args[0]
class ProtocolError(DimError):
pass
class DimClient(object):
def __init__(self, server_url, cookie_file=None, cookie_umask=None):
self.server_url = server_url
self.cookie_jar = LWPCookieJar()
self.session = build_opener(HTTPCookieProcessor(self.cookie_jar))
if cookie_file:
self._use_cookie_file(cookie_file, cookie_umask)
def login(self, username, password, permanent_session=False):
try:
self.session.open(self.server_url + '/login',
urlencode(dict(username=username,
password=password,
permanent_session=permanent_session)).encode('utf8'))
self.check_protocol_version()
self._update_cookie_file()
return True
except HTTPError as e:
logger.error("Login failed: " + str(e))
return False
@property
def logged_in(self):
try:
self.get_username()
# update cookie file with refreshed cookie(s) from response
self._update_cookie_file()
return True
except HTTPError as e:
if e.code == 403:
return False
else:
raise
except DimError as e:
# InvalidUserError
# valid cookie, but user is missing in DIM
if e.code == 8:
return False
else:
raise
def _update_cookie_file(self):
if self.cookie_jar.filename and self.save_cookie:
# Use umask when saving cookie
if self.cookie_umask is not None:
old_mask = os.umask(self.cookie_umask)
self.cookie_jar.save()
if self.cookie_umask is not None:
os.umask(old_mask)
def _use_cookie_file(self, cookie_file, cookie_umask, save_cookie=True):
self.cookie_jar.filename = cookie_file
self.save_cookie = save_cookie
self.cookie_umask = cookie_umask
try:
self.cookie_jar.load()
except:
pass
def login_prompt(self, username=None, password=None, permanent_session=False, ignore_cookie=False):
if not ignore_cookie and self.logged_in:
return True
else:
if username is None:
username = input('Username: ')
if password is None:
password = getpass.getpass()
return self.login(username, password, permanent_session)
def check_protocol_version(self):
try:
server_protocol = self.protocol_version()
except Exception as e:
raise ProtocolError("The server does not have the JSONRPC interface enabled (%s)" % e)
if server_protocol != PROTOCOL_VERSION:
raise ProtocolError("Server protocol version (%s) does not match client protocol version (%s)" %
(server_protocol, PROTOCOL_VERSION))
def raw_call(self, function, *args):
url = self.server_url + "/jsonrpc"
json_call = json.dumps(dict(jsonrpc='2.0',
method=function,
params=args,
id=None))
logger.debug('dim call: %s(%s)' % (function, ', '.join([repr(x) for x in args])))
start = time.time()
request = Request(url, data=json_call.encode('utf8'), headers={'Content-Type': 'application/json'})
response = self.session.open(request).read()
rpc_response = json.loads(response.decode('utf8'))
logger.debug('time taken: %.3f' % (time.time() - start))
if 'error' in rpc_response:
logger.debug('dim error: ' + str(rpc_response['error']))
raise DimError(message=rpc_response['error']['message'],
code=rpc_response['error']['code'])
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('dim result: ' + pformat(rpc_response['result']))
return rpc_response['result']
def call(self, function, *args, **kwargs):
'''
Instead of passing the last argument as a dictionary (usually called
"options"), you can use keyword arguments.
.. note::
Keyword arguments cannot be used for positional jsonrpc arguments.
'''
passed_args = args
if kwargs:
passed_args += (kwargs,)
return self.raw_call(function, *passed_args)
def __getattr__(self, name):
return lambda *args, **kwargs: self.call(name, *args, **kwargs)
def ip_list_all(self, **options):
total = options.get('limit', 10)
result = []
while len(result) < total:
batch = self.ip_list(**options)
if len(batch) == 0:
break
result.extend(batch)
options['limit'] -= len(batch)
options['after'] = batch[-1]['ip']
return result
|
py | 1a37455b3389587571a8501a8e58a11aa8d807da | """Common methods used across tests for Bond."""
from asyncio import TimeoutError as AsyncIOTimeoutError
from contextlib import nullcontext
from datetime import timedelta
from typing import Any, Dict, Optional
from homeassistant import core
from homeassistant.components.bond.const import DOMAIN as BOND_DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST, STATE_UNAVAILABLE
from homeassistant.setup import async_setup_component
from homeassistant.util import utcnow
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
def patch_setup_entry(domain: str, *, enabled: bool = True):
"""Patch async_setup_entry for specified domain."""
if not enabled:
return nullcontext()
return patch(f"homeassistant.components.bond.{domain}.async_setup_entry")
async def setup_bond_entity(
hass: core.HomeAssistant,
config_entry: MockConfigEntry,
*,
patch_version=False,
patch_device_ids=False,
patch_platforms=False,
):
"""Set up Bond entity."""
config_entry.add_to_hass(hass)
with patch_bond_version(enabled=patch_version), patch_bond_device_ids(
enabled=patch_device_ids
), patch_setup_entry("cover", enabled=patch_platforms), patch_setup_entry(
"fan", enabled=patch_platforms
), patch_setup_entry(
"light", enabled=patch_platforms
), patch_setup_entry(
"switch", enabled=patch_platforms
):
return await hass.config_entries.async_setup(config_entry.entry_id)
async def setup_platform(
hass: core.HomeAssistant,
platform: str,
discovered_device: Dict[str, Any],
bond_device_id: str = "bond-device-id",
props: Dict[str, Any] = None,
):
"""Set up the specified Bond platform."""
mock_entry = MockConfigEntry(
domain=BOND_DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_ACCESS_TOKEN: "test-token"},
)
mock_entry.add_to_hass(hass)
with patch("homeassistant.components.bond.PLATFORMS", [platform]):
with patch_bond_version(), patch_bond_device_ids(
return_value=[bond_device_id]
), patch_bond_device(
return_value=discovered_device
), patch_bond_device_state(), patch_bond_device_properties(
return_value=props
), patch_bond_device_state():
assert await async_setup_component(hass, BOND_DOMAIN, {})
await hass.async_block_till_done()
return mock_entry
def patch_bond_version(
enabled: bool = True, return_value: Optional[dict] = None, side_effect=None
):
"""Patch Bond API version endpoint."""
if not enabled:
return nullcontext()
if return_value is None:
return_value = {"bondid": "test-bond-id"}
return patch(
"homeassistant.components.bond.Bond.version",
return_value=return_value,
side_effect=side_effect,
)
def patch_bond_device_ids(enabled: bool = True, return_value=None, side_effect=None):
"""Patch Bond API devices endpoint."""
if not enabled:
return nullcontext()
if return_value is None:
return_value = []
return patch(
"homeassistant.components.bond.Bond.devices",
return_value=return_value,
side_effect=side_effect,
)
def patch_bond_device(return_value=None):
"""Patch Bond API device endpoint."""
return patch(
"homeassistant.components.bond.Bond.device", return_value=return_value,
)
def patch_bond_action():
"""Patch Bond API action endpoint."""
return patch("homeassistant.components.bond.Bond.action")
def patch_bond_device_properties(return_value=None):
"""Patch Bond API device properties endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_properties",
return_value=return_value,
)
def patch_bond_device_state(return_value=None, side_effect=None):
"""Patch Bond API device state endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_state",
return_value=return_value,
side_effect=side_effect,
)
async def help_test_entity_available(
hass: core.HomeAssistant, domain: str, device: Dict[str, Any], entity_id: str
):
"""Run common test to verify available property."""
await setup_platform(hass, domain, device)
assert hass.states.get(entity_id).state != STATE_UNAVAILABLE
with patch_bond_device_state(side_effect=AsyncIOTimeoutError()):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
with patch_bond_device_state(return_value={}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state != STATE_UNAVAILABLE
|
py | 1a37455e637b2e9571622945804bce9102c88028 | import discord
import datetime
import pytz
from discord.ext import commands
from utils.bot import EpicBot
from config import MAIN_COLOR
from utils.time import convert_int_to_weekday
from utils.custom_checks import mutual_guild
from handler import slash_command, InteractionContext
stream_schedule = {
0: True, # Monday
1: False, # Tuesday
2: True, # Wednesday
3: True, # Thrusday
4: False, # Friday
5: True, # Saturday
6: False # Sunday
}
live_text = "Ramaziz will be live today!"
not_live_text = "Ramaziz will not be live today!"
be_sure = "Be sure to check <#762550256918724640> in case of any stream cancellations!"
class RamTimeView(discord.ui.View):
def __init__(self, author_id: int, time_embed: discord.Embed, current_time: datetime.datetime):
super().__init__(timeout=None)
self.author_id = author_id
self.time_embed = time_embed
self.current_time = current_time
@discord.ui.button(label="Time", emoji='⏰', style=discord.ButtonStyle.blurple, disabled=True)
async def time(self, button: discord.ui.Button, interaction: discord.Interaction):
for item in self.children:
item.disabled = False
button.disabled = True
await interaction.message.edit(embed=self.time_embed, view=self)
@discord.ui.button(label="Stream Schedule", emoji='📝', style=discord.ButtonStyle.blurple)
async def stream_schedule(self, button: discord.ui.Button, interaction: discord.Interaction):
for item in self.children:
item.disabled = False
button.disabled = True
stream_schedule_embed = discord.Embed(
title="Stream Schedule",
description="Ramaziz's twitch stream schedule: **[Go follow!](https://twitch.tv/ramaziz)**",
color=MAIN_COLOR
).add_field(
name="Current Stream",
value=f"{live_text if stream_schedule[self.current_time.weekday()] else not_live_text}\n{be_sure}",
inline=False
).add_field(
name="Schedule",
value='\n'.join([f"**{convert_int_to_weekday(i)}** • {stream_schedule[i]}" for i in stream_schedule]),
inline=False
)
await interaction.message.edit(embed=stream_schedule_embed, view=self)
@discord.ui.button(label="Close menu", emoji='⏹️', style=discord.ButtonStyle.danger)
async def close(self, button: discord.ui.Button, interaction: discord.Interaction):
await interaction.message.delete()
async def interaction_check(self, interaction: discord.Interaction):
if interaction.user.id == self.author_id:
return True
else:
return await interaction.response.send_message("Not your command o_o", ephemeral=True)
class PrivateCmds(commands.Cog):
def __init__(self, client: EpicBot):
self.client = client
@commands.command(
aliases=['ram-time', 'time-ram', 'timeram', 'time_ram', 'ramaziztime', 'ramaziz_time', 'ramaziz-time', 'ramtime'],
help="Ever wonder what time is it for Ramaziz?"
)
@mutual_guild(719157704467152977)
@slash_command(name='ramtime', guild_ids=[719157704467152977, 749996055369875456], help="Check what time it is for Ramaziz!")
async def ram_time(self, ctx: InteractionContext):
dt_utc = datetime.datetime.now(tz=pytz.UTC)
dt_nzt = dt_utc.astimezone(pytz.timezone("NZ"))
time_embed = discord.Embed(title="⏰ Ram Time", color=MAIN_COLOR)
time_embed.add_field(name="Time", value=f"{dt_nzt.strftime('%I : %M : %S %p')}", inline=False)
time_embed.add_field(name="Date", value=f"{convert_int_to_weekday(dt_nzt.weekday())} | {dt_nzt.day} / {dt_nzt.month} / {dt_nzt.year}", inline=False)
view = RamTimeView(ctx.author.id, time_embed, dt_nzt)
await ctx.reply(embed=time_embed, view=view)
@slash_command(guild_ids=[746202728031584358], help="Very very secret command, don't tell Kitten btw! 👀")
async def kitten(self, ctx: InteractionContext):
await ctx.reply("Don't tell kitten 👀 but dogs are kinda cute uwu", ephemeral=True)
def setup(client: EpicBot):
client.add_cog(PrivateCmds(client))
|
py | 1a374568d1674ef0809ad94034a481abfc997f7a | class Restaurant:
def __init__(self, name, cuisine_type):
self.name = name
self.type = cuisine_type
def describe_restaurant(self):
print(f"{self.name} is a restaurant that sells {self.type} food")
def open_restaurant(self):
print("The restaurant is open")
bob = Restaurant("Bob's Burgers", "American")
joe = Restaurant("Joe's Salami", "Croatian")
steve = Restaurant("Steve's Calamari", "Greek")
joe.describe_restaurant()
steve.describe_restaurant()
bob.describe_restaurant()
|
py | 1a37473fc1f21535b28b1230f62c22debec74483 | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["M Glass - ESRF ISDD Advanced Analysis and Modelling"]
__license__ = "MIT"
__date__ = "20/04/2017"
import numpy as np
from comsyl.autocorrelation.PhaseSpaceDensity import PhaseSpaceDensity
from comsyl.autocorrelation.AutocorrelationBuilderStrategies import BuilderStrategyConvolution, BuilderStrategyPython
from comsyl.utils.Logger import log
class AutocorrelationBuilder(object):
def __init__(self, N_e, sigma_matrix, weighted_fields, x_coordinates, y_coordinates, k, strategy=None):
self._N_e = N_e
self._sigma_matrix = sigma_matrix
self._density = PhaseSpaceDensity(sigma_matrix, k)
self._weighted_fields = weighted_fields.copy()
self._field_x_coordinates = x_coordinates.copy()
self._field_y_coordinates = y_coordinates.copy()
self._x_coordinates = x_coordinates.copy() # self._minkowskiSum(x_coordinates)
self._y_coordinates = y_coordinates.copy() # self._minkowskiSum(y_coordinates)
if strategy is None:
if self._density.isAlphaZero():
strategy = BuilderStrategyConvolution
else:
log("Found alpha not equal to zero. Can not use convolutions.")
strategy = BuilderStrategyPython
self.setStrategy(strategy)
self.setAllCoordinates(self._field_x_coordinates, self._field_y_coordinates)
self.setDoNotUseConvolutions(False)
def setStrategy(self, strategy):
log("Setting autocorrelation strategy: %s" % str(strategy.__name__))
self._strategy = strategy(self._x_coordinates, self._y_coordinates,
self._density,
self._field_x_coordinates, self._field_y_coordinates,
self._weighted_fields)
def _minkowskiSum(self, coordinates):
delta_coordinate = coordinates[1] - coordinates[0]
interval = delta_coordinate * coordinates.shape[0]
mink_sum = np.linspace(-interval, interval, coordinates.shape[0] * 1)#2-1)
return mink_sum
def xCoordinates(self):
return self._x_coordinates
def yCoordinates(self):
return self._y_coordinates
def staticElectronDensity(self):
return self._strategy._rho
def setAllCoordinates(self, x_coordinates, y_coordinates):
self._strategy.setAllCoordinates(x_coordinates, y_coordinates)
def evaluate(self, r_1, r_2):
return self._strategy.evaluate(r_1, r_2)
def evaluateAllR_2(self, r_1):
return self._strategy.evaluateAllR_2(r_1)
def calculateIntensity(self):
return self._strategy.calculateIntensity()
def setDoNotUseConvolutions(self, do_not_use_convolutions):
self._strategy.setDoNotUseConvolutions(do_not_use_convolutions) |
py | 1a3749d263e1cc9e2750b96a3d66d2a7346cb790 | #!/usr/bin/env python3
"""Using chain()
"""
# end_pymotw_header
from itertools import *
for i in chain([1, 2, 3], ["a", "b", "c"]):
print(i, end=" ")
print()
|
py | 1a374b4cb1a3a25fbd4d6cc61f57ca27b60156fb | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Infrastructure of options for Koalas.
"""
from contextlib import contextmanager
import json
from typing import Union, Any, Tuple, Callable, List, Dict
from pyspark._globals import _NoValue, _NoValueType
from databricks.koalas.utils import default_session
__all__ = ["get_option", "set_option", "reset_option", "options", "option_context"]
class Option:
"""
Option class that defines an option with related properties.
This class holds all information relevant to the one option. Also,
Its instance can validate if the given value is acceptable or not.
It is currently for internal usage only.
Parameters
----------
key: str, keyword-only argument
the option name to use.
doc: str, keyword-only argument
the documentation for the current option.
default: Any, keyword-only argument
default value for this option.
types: Union[Tuple[type, ...], type], keyword-only argument
default is str. It defines the expected types for this option. It is
used with `isinstance` to validate the given value to this option.
check_func: Tuple[Callable[[Any], bool], str], keyword-only argument
default is a function that always returns `True` with a empty string.
It defines:
- a function to check the given value to this option
- the error message to show when this check is failed
When new value is set to this option, this function is called to check
if the given value is valid.
Examples
--------
>>> option = Option(
... key='option.name',
... doc="this is a test option",
... default="default",
... types=(float, int),
... check_func=(lambda v: v > 0, "should be a positive float"))
>>> option.validate('abc') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The value for option 'option.name' was <class 'str'>;
however, expected types are [(<class 'float'>, <class 'int'>)].
>>> option.validate(-1.1)
Traceback (most recent call last):
...
ValueError: should be a positive float
>>> option.validate(1.1)
"""
def __init__(
self,
*,
key: str,
doc: str,
default: Any,
types: Union[Tuple[type, ...], type] = str,
check_func: Tuple[Callable[[Any], bool], str] = (lambda v: True, "")
):
self.key = key
self.doc = doc
self.default = default
self.types = types
self.check_func = check_func
def validate(self, v: Any) -> None:
"""
Validate the given value and throw an exception with related information such as key.
"""
if not isinstance(v, self.types):
raise ValueError(
"The value for option '%s' was %s; however, expected types are "
"[%s]." % (self.key, type(v), str(self.types))
)
if not self.check_func[0](v):
raise ValueError(self.check_func[1])
# Available options.
#
# NOTE: if you are fixing or adding an option here, make sure you execute `show_options()` and
# copy & paste the results into show_options 'docs/source/user_guide/options.rst' as well.
# See the examples below:
# >>> from databricks.koalas.config import show_options
# >>> show_options()
_options = [
Option(
key="display.max_rows",
doc=(
"This sets the maximum number of rows Koalas should output when printing out "
"various output. For example, this value determines the number of rows to be "
"shown at the repr() in a dataframe. Set `None` to unlimit the input length. "
"Default is 1000."
),
default=1000,
types=(int, type(None)),
check_func=(
lambda v: v is None or v >= 0,
"'display.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="compute.max_rows",
doc=(
"'compute.max_rows' sets the limit of the current DataFrame. Set `None` to unlimit "
"the input length. When the limit is set, it is executed by the shortcut by "
"collecting the data into driver side, and then using pandas API. If the limit is "
"unset, the operation is executed by PySpark. Default is 1000."
),
default=1000,
types=(int, type(None)),
check_func=(
lambda v: v is None or v >= 0,
"'compute.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="compute.shortcut_limit",
doc=(
"'compute.shortcut_limit' sets the limit for a shortcut. "
"It computes specified number of rows and use its schema. When the dataframe "
"length is larger than this limit, Koalas uses PySpark to compute."
),
default=1000,
types=int,
check_func=(
lambda v: v >= 0,
"'compute.shortcut_limit' should be greater than or equal to 0.",
),
),
Option(
key="compute.ops_on_diff_frames",
doc=(
"This determines whether or not to operate between two different dataframes. "
"For example, 'combine_frames' function internally performs a join operation which "
"can be expensive in general. So, if `compute.ops_on_diff_frames` variable is not "
"True, that method throws an exception."
),
default=False,
types=bool,
),
Option(
key="compute.default_index_type",
doc=("This sets the default index type: sequence, distributed and distributed-sequence."),
default="sequence",
types=str,
check_func=(
lambda v: v in ("sequence", "distributed", "distributed-sequence"),
"Index type should be one of 'sequence', 'distributed', 'distributed-sequence'.",
),
),
Option(
key="compute.ordered_head",
doc=(
"'compute.ordered_head' sets whether or not to operate head with natural ordering. "
"Koalas does not guarantee the row ordering so `head` could return some rows from "
"distributed partitions. If 'compute.ordered_head' is set to True, Koalas performs "
"natural ordering beforehand, but it will cause a performance overhead."
),
default=False,
types=bool,
),
Option(
key="plotting.max_rows",
doc=(
"'plotting.max_rows' sets the visual limit on top-n-based plots such as `plot.bar` "
"and `plot.pie`. If it is set to 1000, the first 1000 data points will be used "
"for plotting. Default is 1000."
),
default=1000,
types=int,
check_func=(
lambda v: v is v >= 0,
"'plotting.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="plotting.sample_ratio",
doc=(
"'plotting.sample_ratio' sets the proportion of data that will be plotted for sample-"
"based plots such as `plot.line` and `plot.area`. "
"This option defaults to 'plotting.max_rows' option."
),
default=None,
types=(float, type(None)),
check_func=(
lambda v: v is None or 1 >= v >= 0,
"'plotting.sample_ratio' should be 1.0 >= value >= 0.0.",
),
),
Option(
key="plotting.backend",
doc=(
"Backend to use for plotting. Default is matplotlib. "
"Supports any package that has a top-level `.plot` method. "
"Some options are: [matplotlib, plotly, pandas_bokeh, pandas_altair]."
),
default="matplotlib",
types=str,
),
] # type: List[Option]
_options_dict = dict(zip((option.key for option in _options), _options)) # type: Dict[str, Option]
_key_format = "koalas.{}".format
class OptionError(AttributeError, KeyError):
pass
def show_options():
"""
Make a pretty table that can be copied and pasted into public documentation.
This is currently for an internal purpose.
Examples
--------
>>> show_options() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
================... =======... =====================...
Option Default Description
================... =======... =====================...
display.max_rows 1000 This sets the maximum...
...
================... =======... =====================...
"""
import textwrap
header = ["Option", "Default", "Description"]
row_format = "{:<31} {:<14} {:<53}"
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
print(row_format.format(*header))
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
for option in _options:
doc = textwrap.fill(option.doc, 53)
formatted = "".join([line + "\n" + (" " * 47) for line in doc.split("\n")]).rstrip()
print(row_format.format(option.key, repr(option.default), formatted))
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
def get_option(key: str, default: Union[Any, _NoValueType] = _NoValue) -> Any:
"""
Retrieves the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
default : object
The default value if the option is not set yet. The value should be JSON serializable.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists and the default is not provided
"""
_check_option(key)
if default is _NoValue:
default = _options_dict[key].default
_options_dict[key].validate(default)
return json.loads(default_session().conf.get(_key_format(key), default=json.dumps(default)))
def set_option(key: str, value: Any) -> None:
"""
Sets the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
value : object
New value of option. The value should be JSON serializable.
Returns
-------
None
"""
_check_option(key)
_options_dict[key].validate(value)
default_session().conf.set(_key_format(key), json.dumps(value))
def reset_option(key: str) -> None:
"""
Reset one option to their default value.
Pass "all" as argument to reset all options.
Parameters
----------
key : str
If specified only option will be reset.
Returns
-------
None
"""
_check_option(key)
default_session().conf.unset(_key_format(key))
@contextmanager
def option_context(*args):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'compute.max_rows', 5):
... print(get_option('display.max_rows'), get_option('compute.max_rows'))
10 5
>>> print(get_option('display.max_rows'), get_option('compute.max_rows'))
1000 1000
"""
if len(args) == 0 or len(args) % 2 != 0:
raise ValueError("Need to invoke as option_context(pat, val, [(pat, val), ...]).")
opts = dict(zip(args[::2], args[1::2]))
orig_opts = {key: get_option(key) for key in opts}
try:
for key, value in opts.items():
set_option(key, value)
yield
finally:
for key, value in orig_opts.items():
set_option(key, value)
def _check_option(key: str) -> None:
if key not in _options_dict:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
class DictWrapper:
""" provide attribute-style access to a nested dict"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix:
prefix += "."
canonical_key = prefix + key
candidates = [
k for k in d.keys() if all(x in k.split(".") for x in canonical_key.split("."))
]
if len(candidates) == 1 and candidates[0] == canonical_key:
return set_option(canonical_key, val)
else:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix:
prefix += "."
canonical_key = prefix + key
candidates = [
k for k in d.keys() if all(x in k.split(".") for x in canonical_key.split("."))
]
if len(candidates) == 1 and candidates[0] == canonical_key:
return get_option(canonical_key)
elif len(candidates) == 0:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
else:
return DictWrapper(d, canonical_key)
def __dir__(self):
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix == "":
candidates = d.keys()
offset = 0
else:
candidates = [k for k in d.keys() if all(x in k.split(".") for x in prefix.split("."))]
offset = len(prefix) + 1 # prefix (e.g. "compute.") to trim.
return [c[offset:] for c in candidates]
options = DictWrapper(_options_dict)
|
py | 1a374c5ba2bfe146a8fc7f6c7c3a43db8b6679cb | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = [
"airbyte-protocol",
"base-python",
"backoff==1.10.0",
"pendulum==1.2.0",
"requests==2.25.1",
]
TEST_REQUIREMENTS = ["pytest", "requests_mock==1.8.0"]
setup(
name="source_zendesk_talk",
description="Source implementation for Zendesk Talk.",
author="Airbyte",
author_email="[email protected]",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS + TEST_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json"]},
)
|
py | 1a374cb3dff27a95ad5e4e402541d09db6bfaf5d | """
ASGI config for jimmyapp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'jimmyapp.settings')
application = get_asgi_application()
|
py | 1a374ddbc632401f29ed515527c24286f6a4c47a | #!/usr/bin/env python
"""
This application presents a 'console' prompt to the user asking for read commands
which create ReadPropertyRequest PDUs, then lines up the coorresponding ReadPropertyACK
and prints the value.
"""
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, enable_sleeping
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address
from bacpypes.apdu import ReadPropertyRequest, ReadPropertyACK
from bacpypes.primitivedata import Unsigned
from bacpypes.constructeddata import Array
from bacpypes.app import BIPSimpleApplication
from bacpypes.object import get_object_class, get_datatype
from bacpypes.local.device import LocalDeviceObject
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
this_application = None
#
# ReadPropertyConsoleCmd
#
class ReadPropertyConsoleCmd(ConsoleCmd):
def do_read(self, args):
"""read <addr> <type> <inst> <prop> [ <indx> ]"""
args = args.split()
if _debug: ReadPropertyConsoleCmd._debug("do_read %r", args)
try:
addr, obj_type, obj_inst, prop_id = args[:4]
if obj_type.isdigit():
obj_type = int(obj_type)
elif not get_object_class(obj_type):
raise ValueError("unknown object type")
obj_inst = int(obj_inst)
datatype = get_datatype(obj_type, prop_id)
if not datatype:
raise ValueError("invalid property for object type")
# build a request
request = ReadPropertyRequest(
objectIdentifier=(obj_type, obj_inst),
propertyIdentifier=prop_id,
)
request.pduDestination = Address(addr)
if len(args) == 5:
request.propertyArrayIndex = int(args[4])
if _debug: ReadPropertyConsoleCmd._debug(" - request: %r", request)
# make an IOCB
iocb = IOCB(request)
if _debug: ReadPropertyConsoleCmd._debug(" - iocb: %r", iocb)
# give it to the application
this_application.request_io(iocb)
# wait for it to complete
iocb.wait()
# do something for success
if iocb.ioResponse:
apdu = iocb.ioResponse
# should be an ack
if not isinstance(apdu, ReadPropertyACK):
if _debug: ReadPropertyConsoleCmd._debug(" - not an ack")
return
# find the datatype
datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier)
if _debug: ReadPropertyConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise TypeError("unknown datatype")
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = apdu.propertyValue.cast_out(Unsigned)
else:
value = apdu.propertyValue.cast_out(datatype.subtype)
else:
value = apdu.propertyValue.cast_out(datatype)
if _debug: ReadPropertyConsoleCmd._debug(" - value: %r", value)
sys.stdout.write(str(value) + '\n')
if hasattr(value, 'debug_contents'):
value.debug_contents(stream=sys.stdout)
sys.stdout.flush()
# do something for error/reject/abort
if iocb.ioError:
sys.stdout.write(str(iocb.ioError) + '\n')
except Exception, error:
ReadPropertyConsoleCmd._exception("exception: %r", error)
def do_rtn(self, args):
"""rtn <addr> <net> ... """
args = args.split()
if _debug: ReadPropertyConsoleCmd._debug("do_rtn %r", args)
# safe to assume only one adapter
adapter = this_application.nsap.adapters[0]
if _debug: ReadPropertyConsoleCmd._debug(" - adapter: %r", adapter)
# provide the address and a list of network numbers
router_address = Address(args[0])
network_list = [int(arg) for arg in args[1:]]
# pass along to the service access point
this_application.nsap.add_router_references(adapter, router_address, network_list)
bacpypes_debugging(ReadPropertyConsoleCmd)
#
# __main__
#
def main():
global this_application
# check the version
if (sys.version_info[:2] != (2, 5)):
sys.stderr.write("Python 2.5 only\n")
sys.exit(1)
# parse the command line arguments
args = ConfigArgumentParser(description=__doc__).parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(
objectName=args.ini.objectname,
objectIdentifier=int(args.ini.objectidentifier),
maxApduLengthAccepted=int(args.ini.maxapdulengthaccepted),
segmentationSupported=args.ini.segmentationsupported,
vendorIdentifier=int(args.ini.vendoridentifier),
)
# make a simple application
this_application = BIPSimpleApplication(this_device, args.ini.address)
# get the services supported
services_supported = this_application.get_services_supported()
if _debug: _log.debug(" - services_supported: %r", services_supported)
# let the device object know
this_device.protocolServicesSupported = services_supported.value
# make a console
this_console = ReadPropertyConsoleCmd()
if _debug: _log.debug(" - this_console: %r", this_console)
# enable sleeping will help with threads
enable_sleeping()
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
|
py | 1a374e44cf09556bb87679f57736df2344d9c6ce | from flask_cors import CORS
from flask import Flask, render_template, request
from app.config.middleware import checkLogin
from app.controllers import misc, user, dataset, klasifikasi, algoritma, username
import os
## di atas adalah modul2 yang digunakan
app = Flask(__name__)
CORS(app)
##Support Vector Machine
@app.route("/algoritma/svm")
@checkLogin ## fungsinya untuk ngecek apakah pengguna sudah login atau belum
def svm_index():
return algoritma.supportVectorMachine(request.args)
##Backpropagation
@app.route("/algoritma/backpro")
@checkLogin
def backpro_index():
return algoritma.backpropagation(request.args)
##crud
##Pengguna
@app.route("/users")
@checkLogin
def user_index():
return user.index() ## ini berarti menjalankan fungsi index yang ada di modul user. Modulnya bisa dicari di atas (import)
##methods :
# GET = ngambil data
# POST = nyimpen data
# PUT = update data
# DELETE = hapus data
@app.route("/user/store", methods=['POST'])
@checkLogin
def user_store():
return user.store(request.form)
@app.route("/user/<int:id>/update", methods=['POST'])
@checkLogin
def user_update(id):
return user.update(request.form, id)
@app.route("/user/<int:id>/delete", methods=['POST'])
@checkLogin
def user_delete(id):
return user.delete(id)
##klasifikasi
@app.route("/klasifikasi")
@checkLogin
def klasifikasi_index():
return klasifikasi.index()
@app.route("/klasifikasi/store", methods=['POST'])
@checkLogin
def klasifikasi_store():
return klasifikasi.store(request.form)
@app.route("/klasifikasi/<int:id>/update", methods=['POST', 'PUT'])
@checkLogin
def klasifikasi_update(id):
return klasifikasi.update(request.form, id)
@app.route("/klasifikasi/<int:id>/delete", methods=['POST', 'DELETE'])
@checkLogin
def klasifikasi_delete(id):
return klasifikasi.delete(id)
##username
@app.route("/username")
@checkLogin
def username_index():
return username.index()
@app.route("/username/store", methods=['POST'])
@checkLogin
def username_store():
return username.store(request.form)
@app.route("/username/<int:id>/update", methods=['POST', 'PUT'])
@checkLogin
def username_update(id):
return username.update(request.form, id)
@app.route("/username/<int:id>/delete", methods=['POST', 'DELETE'])
@checkLogin
def username_delete(id):
return username.delete(id)
##Dataset
@app.route("/dataset")
@checkLogin
def dataset_index():
return dataset.index()
@app.route("/dataset/tanggal-scraping", methods=['GET'])
@checkLogin
def dataset_tanggal():
return dataset.updateTanggal()
@app.route("/dataset/scrape", methods=['GET'])
@checkLogin
def dataset_scrape():
return dataset.scrape()
@app.route("/dataset/store", methods=['POST'])
@checkLogin
def dataset_store():
return dataset.store(request.form)
@app.route("/dataset/change-class", methods=["GET"])
@checkLogin
def dataset_change_class():
return dataset.changeClass(request.args)
@app.route("/dataset/<int:id>/delete", methods=['POST', 'DELETE'])
@checkLogin
def dataset_delete(id):
return dataset.delete(id)
##MISC
@app.route("/")
def index():
return misc.index()
##MISC
@app.route("/input-caption", methods=["POST"])
def input_caption():
return algoritma.prediksi_caption(request.form)
@app.route("/login")
def login():
return misc.login()
@app.route("/doLogin", methods=['POST'])
def doLogin():
return misc.doLogin(request.form)
@app.route("/logout")
def logout():
return misc.logout()
app.secret_key = '3RDLwwtFttGSxkaDHyFTmvGytBJ2MxWT8ynWm2y79G8jm9ugYxFFDPdHcBBnHp6E'
app.config['SESSION_TYPE'] = 'filesystem'
@app.context_processor
def inject_stage_and_region():
return dict(APP_NAME=os.environ.get("APP_NAME"),
APP_AUTHOR=os.environ.get("APP_AUTHOR"),
APP_TITLE=os.environ.get("APP_TITLE"),
APP_LOGO=os.environ.get("APP_LOGO"))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5299) |
py | 1a374f1051f5a991390b2a9a5392e08a134893b3 | # Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module generates a docker environment for a job'''
from __future__ import division
from fabric.api import sudo, run, settings
from logging import getLogger
from os.path import join as join_path
from time import sleep
from tests.comparison.leopard.controller import (
SHOULD_BUILD_IMPALA,
SHOULD_LOAD_DATA,
SHOULD_PULL_DOCKER_IMAGE)
import random
import os
IMPALA_HOME = '/home/dev/Impala'
CORE_PATH = '/tmp/core_files'
DEFAULT_BRANCH_NAME = 'origin/cdh5-trunk'
DEFAULT_DOCKER_IMAGE_NAME = 'impala-desktop.ca.cloudera.com:5000/ubuntu-14.04:cdh5-trunk'
DOCKER_USER_NAME = 'dev'
NUM_START_ATTEMPTS = 50
NUM_FABRIC_ATTEMPTS = 50
LOG = getLogger('ImpalaDockerEnv')
def retry(func):
'''Retry decorator.'''
def wrapper(*args, **kwargs):
attempt_num = 0
while True:
attempt_num += 1
try:
return func(*args, **kwargs)
except:
LOG.exception('{0} exception [{1}] (try: {2})'.format(
func.__name__, args[0], attempt_num))
if attempt_num == NUM_FABRIC_ATTEMPTS:
raise
sleep_time = random.randint(1, attempt_num)
sleep(sleep_time)
return wrapper
class ImpalaDockerEnv(object):
'''Represents an Impala environemnt inside a Docker container. Used for starting
Impala, getting stack traces after a crash and keeping track of the ports on which SSH,
Postgres and Impala are running.
'''
def __init__(self, git_command):
self.ssh_port = None
self.impala_port = None
self.postgres_port = None
self.container_id = None
self.git_command = git_command
self.host = os.environ['TARGET_HOST']
self.host_username = os.environ['TARGET_HOST_USERNAME']
self.docker_image_name = os.environ.get(
'DOCKER_IMAGE_NAME', DEFAULT_DOCKER_IMAGE_NAME)
def stop_docker(self):
with settings(warn_only = True, host_string = self.host, user = self.host_username):
retry(sudo)('docker stop {0}'.format(self.container_id), pty=True)
retry(sudo)('docker rm {0}'.format(self.container_id), pty=True)
def start_new_container(self):
'''Starts a container with port forwarding for ssh, impala and postgres. '''
for _ in range(NUM_START_ATTEMPTS):
with settings(warn_only = True, host_string = self.host, user = self.host_username):
set_core_dump_location_command = \
"echo '/tmp/core_files/core.%e.%p' | sudo tee /proc/sys/kernel/core_pattern"
sudo(set_core_dump_location_command, pty=True)
port = random.randint(0, 999)
self.ssh_port = 55000 + port
self.impala_port = 56000 + port
self.postgres_port = 57000 + port
start_command = ''
if SHOULD_PULL_DOCKER_IMAGE:
start_command = 'docker pull {docker_image_name} && '.format(
docker_image_name = self.docker_image_name)
start_command += (
'docker run -d -t -p {postgres_port}:5432 -p {ssh_port}:22 '
'-p {impala_port}:21050 {docker_image_name} /bin/docker-boot-daemon').format(
ssh_port = self.ssh_port,
impala_port = self.impala_port,
postgres_port = self.postgres_port,
docker_image_name = self.docker_image_name)
try:
self.container_id = sudo(start_command, pty=True)
except:
LOG.exception('start_new_container')
if self.container_id is not None:
break
else:
LOG.error('Container failed to start after {0} attempts'.format(NUM_START_ATTEMPTS))
def get_git_hash(self):
'''Returns Git hash if the current commit. '''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
git_hash = retry(run)('cd {IMPALA_HOME} && git rev-parse --short HEAD'.format(
IMPALA_HOME = IMPALA_HOME))
return git_hash
def run_all(self):
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
run_all_command = (
'mkdir -p {CORE_PATH} && chmod 777 {CORE_PATH} && cd {IMPALA_HOME} '
'&& source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/bin/create-test-configuration.sh '
'&& {IMPALA_HOME}/testdata/bin/run-all.sh').format(
IMPALA_HOME = IMPALA_HOME,
CORE_PATH=CORE_PATH)
retry(run)(run_all_command, pty=False)
def build_impala(self):
'''Fetches and Builds Impala. If git_command is not present the latest version is
fetched by default. '''
build_command = None
if self.git_command:
build_command = (
'docker-boot && cd {IMPALA_HOME} && {git_command} '
'&& source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/buildall.sh -notests').format(
git_command = self.git_command,
IMPALA_HOME = IMPALA_HOME,
CORE_PATH = CORE_PATH)
elif SHOULD_BUILD_IMPALA:
build_command = (
'docker-boot && cd {IMPALA_HOME} '
'&& git fetch --all && git checkout origin/cdh5-trunk '
'&& source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/buildall.sh -notests').format(
IMPALA_HOME = IMPALA_HOME,
CORE_PATH = CORE_PATH)
if build_command:
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
result = retry(run)(build_command, pty=False)
LOG.info('Build Complete, Result: {0}'.format(result))
def load_data(self):
if SHOULD_LOAD_DATA:
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
self.start_impala()
load_command = '''cd {IMPALA_HOME} \
&& source bin/impala-config.sh \
&& ./tests/comparison/data_generator.py \
--use-postgresql --db-name=functional \
--migrate-table-names=alltypes,alltypestiny,alltypesagg migrate \
&& ./tests/comparison/data_generator.py --use-postgresql'''.format(
IMPALA_HOME=IMPALA_HOME)
result = retry(run)(load_command, pty=False)
def start_impala(self):
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
start_command = ('source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/bin/start-impala-cluster.py').format(IMPALA_HOME = IMPALA_HOME)
result = retry(run)(start_command, pty=False)
return result
def is_impala_running(self):
'''Check that exactly 3 impalads are running inside the docker instance.'''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
return retry(run)('ps aux | grep impalad').count('/service/impalad') == 3
def get_stack(self):
'''Finds the newest core file and extracts the stack trace from it using gdb. '''
IMPALAD_PATH = '{IMPALA_HOME}/be/build/debug/service/impalad'.format(
IMPALA_HOME = IMPALA_HOME)
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
core_file_name = retry(run)('ls {0} -t1 | head -1'.format(CORE_PATH))
LOG.info('Core File Name: {0}'.format(core_file_name))
if 'core' not in core_file_name:
return None
core_full_path = join_path(CORE_PATH, core_file_name)
stack_trace = retry(run)('gdb {0} {1} --batch --quiet --eval-command=bt'.format(
IMPALAD_PATH, core_full_path))
self.delete_core_files()
return stack_trace
def delete_core_files(self):
'''Delete all core files. This is usually done after the stack was extracted.'''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
retry(run)('rm -f {0}/core.*'.format(CORE_PATH))
def prepare(self):
'''Create a new Impala Environment. Starts a docker container and builds Impala in it.
'''
self.start_new_container()
LOG.info('Container Started')
# Wait for the SSH service to start inside the docker instance. Usually takes 1
# second. This is simple and reliable. An alternative implementation is to poll with
# timeout if SSH was started.
sleep(10)
self.build_impala()
try:
result = self.run_all()
except Exception:
LOG.info('run_all exception')
LOG.info('Run All Complete, Result: {0}'.format(result))
self.load_data()
|
py | 1a374f755d4b8bb61a89545fba19fa568c726995 | import sys
class VirtualMachine:
def __init__(self, name, ram=1, cpu=1.3, hdd=100, os="debian"):
self.name = name
self.ram = ram
self.cpu = cpu
self.hdd = hdd
self.os = os
self.status = 0
self.proc = []
def stop(self):
self.status = 0
self.proc = []
def start(self):
self.status = 1
def suspend(self):
self.status = 2
def reboot(self):
self.stop()
self.start()
def run(self, pid, ram, cpu, hdd):
self.proc.append({
'pid' : pid,
'ram' : ram,
'cpu' : cpu,
'hdd' : hdd
}
)
print(f' -Ejecutamos el proceso {pid}')
def ram_usage(self):
uso_ram = 0
for proceso in self.proc:
uso_ram += proceso['ram']
return round(uso_ram / self.ram * 100, 2)
def cpu_usage (self):
uso_cpu = 0
for proceso in self.proc:
uso_cpu += proceso['cpu']
return round(uso_cpu / self.cpu * 100, 2)
def hdd_usage (self):
uso_hdd = 0
for proceso in self.proc:
uso_hdd += proceso['hdd']
return round(uso_hdd / self.hdd * 100, 2)
def __str__(self):
estado = ''
if self.status == 0:
estado = 'Stopped'
elif self.status == 1:
estado = 'Running'
else:
estado = 'Suspended'
return f'Nombre: {self.name} | SO: {self.os} | {estado} | RAM: {self.ram} | CPU: {self.cpu} | HDD: {self.hdd} | {self.ram_usage()}% RAM used | {self.cpu_usage()}% CPU used | {self.hdd_usage()} % HDD used'
if __name__ == '__main__':
print('═════════════════')
print('Máquina virtual 1')
print('═════════════════')
print('1. Creamos la máquina virtual Minas Tirith')
vm1 = VirtualMachine('Minas Tirith', 8, 2.3, 380, 'ubuntu')
print(vm1)
print('2. Arrancamos la máquina virtual')
vm1.start()
print(vm1)
print('3. Lanzamos los procesos 1, 4 y 7')
vm1.run(1, 1.7, 0.3, 20)
vm1.run(4, 4, 0.9, 100)
vm1.run(7, 0.4, 1.1, 250)
print(vm1)
print('4. Paramos la máquina virtual')
vm1.stop()
print(vm1)
print(' ')
print('═════════════════')
print('Máquina virtual 2')
print('═════════════════')
print('1. Creamos la máquina virtual Rohan')
vm2 = VirtualMachine ('Rohan', 6, 1.9, 250, 'debian')
print(vm2)
print('2. Arrancamos la máquina virtual')
vm2.start()
print(vm2)
print('3. Lanzamos los procesos 2, 5 y 8')
vm2.run(2, 0.6, 0.7, 50)
vm2.run(5, 2.1, 0.2, 75)
vm2.run(8, 2.5, 0.4, 30)
print(vm2)
print('4. Paramos la máquina virtual')
vm2.stop()
print(vm2)
print(' ')
print('═════════════════')
print('Máquina virtual 3')
print('═════════════════')
print('1. Creamos la máquina virtual Rivendel')
vm3 = VirtualMachine ('Rivendel', 16, 3, 1000, 'opensuse')
print(vm3)
print('2. Arrancamos la máquina virtual')
vm3.start()
print(vm3)
print('3. Lanzamos los procesos 3, 6 y 9')
vm3.run(3, 2, 1, 25)
vm3.run(6, 0.3, 0.5, 12)
vm3.run(9, 1.4, 0.8, 65)
print(vm3)
print('4. Paramos la máquina virtual')
vm3.stop()
print(vm3)
|
py | 1a375047bb3d101c723fc9a47594e28aef54284d | # -*- coding: utf-8 -*-
"""
:codeauthor: Jayesh Kariya <[email protected]>
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.sdb as sdb
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
class SdbTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.sdb
"""
def setup_loader_modules(self):
return {sdb: {}}
# 'get' function tests: 1
def test_get(self):
"""
Test if it gets a value from a db, using a uri in the form of
sdb://<profile>/<key>
"""
self.assertEqual(sdb.get("sdb://salt/foo"), "sdb://salt/foo")
# 'set_' function tests: 1
def test_set(self):
"""
Test if it sets a value from a db, using a uri in the form of
sdb://<profile>/<key>
"""
self.assertFalse(sdb.set_("sdb://mymemcached/foo", "bar"))
|
py | 1a375053d1f43683c3841556092d8d1153fb199d | """
This file offers the methods to automatically retrieve the graph friendster.
The graph is automatically retrieved from the NetworkRepository repository.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def Friendster(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/networkrepository",
version: str = "latest",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the friendster graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "latest"
The version of the graph to retrieve.
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of friendster graph.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="Friendster",
repository="networkrepository",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a375290bb4ccf8c5538e58bf245331daa96f408 | """
Common options for ``minidcos docker`` commands.
"""
from typing import Callable
import click
from dcos_e2e.backends import Docker
from dcos_e2e.node import Transport
def node_transport_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for node transport options.
"""
transports = {
'ssh': Transport.SSH,
'docker-exec': Transport.DOCKER_EXEC,
}
backend_default = Docker().transport
[default_option] = [
transport for transport in transports
if transports[transport] == backend_default
]
function = click.option(
'--transport',
type=click.Choice(sorted(transports.keys())),
callback=lambda ctx, param, value: transports[value],
default=default_option,
show_default=True,
envvar='MINIDCOS_DOCKER_TRANSPORT',
help=(
'The communication transport to use. '
'On macOS the SSH transport requires IP routing to be set up. '
'See "minidcos docker setup-mac-network". '
'It also requires the "ssh" command to be available. '
'This can be provided by setting the `MINIDCOS_DOCKER_TRANSPORT` '
'environment variable. '
'When using a TTY, different transports may use different line '
'endings.'
),
)(command) # type: Callable[..., None]
return function
def wait_for_dcos_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for waiting for DC/OS to be up.
"""
function = click.option(
'--wait-for-dcos',
is_flag=True,
help=(
'Wait for DC/OS after creating the cluster. '
'This is equivalent to using "minidcos docker wait" after this '
'command. '
'"minidcos docker wait" has various options available and so may '
'be more appropriate for your use case. '
'If the chosen transport is "docker-exec", this will skip HTTP '
'checks and so the cluster may not be fully ready.'
),
)(command) # type: Callable[..., None]
return function
|
py | 1a3753b87c131952e1dceef32b45dcf952114b6c | from django.core.urlresolvers import reverse
from django.core import mail
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import UserFactory
from oscar.test.testcases import WebTestCase
class TestAnAdmin(WebTestCase):
def setUp(self):
self.staff = UserFactory(is_staff=True, username='1234')
self.commtype = CommunicationEventType.objects.create(
name="Password reset",
category=CommunicationEventType.USER_RELATED)
def test_can_preview_an_email(self):
list_page = self.app.get(reverse('dashboard:comms-list'),
user=self.staff)
update_page = list_page.click('Edit')
form = update_page.form
form['email_subject_template'] = 'Hello {{ user.username }}'
form['email_body_template'] = 'Hello {{ user.username }}'
form['email_body_html_template'] = 'Hello {{ user.username }}'
preview = form.submit('show_preview')
self.assertTrue('Hello 1234' in preview.content.decode('utf8'))
def test_can_send_a_preview_email(self):
list_page = self.app.get(reverse('dashboard:comms-list'),
user=self.staff)
update_page = list_page.click('Edit')
form = update_page.form
form['email_subject_template'] = 'Hello {{ user.username }}'
form['email_body_template'] = 'Hello {{ user.username }}'
form['email_body_html_template'] = 'Hello {{ user.username }}'
form['preview_email'] = '[email protected]'
form.submit('send_preview')
self.assertEqual(len(mail.outbox), 1)
|
py | 1a3753e8ecbc0641e8f39ce3652864939559fff8 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import json
import six
from heat.api.aws import utils as aws_utils
from heat.common import exception
from heat.engine import function
from heat.engine import resource
class FindInMap(function.Function):
'''
A function for resolving keys in the template mappings.
Takes the form::
{ "Fn::FindInMap" : [ "mapping",
"key",
"value" ] }
'''
def __init__(self, stack, fn_name, args):
super(FindInMap, self).__init__(stack, fn_name, args)
try:
self._mapname, self._mapkey, self._mapvalue = self.args
except ValueError as ex:
raise KeyError(six.text_type(ex))
def result(self):
mapping = self.stack.t.maps[function.resolve(self._mapname)]
key = function.resolve(self._mapkey)
value = function.resolve(self._mapvalue)
return mapping[key][value]
class GetAZs(function.Function):
'''
A function for retrieving the availability zones.
Takes the form::
{ "Fn::GetAZs" : "<region>" }
'''
def result(self):
# TODO(therve): Implement region scoping
#region = function.resolve(self.args)
if self.stack is None:
return ['nova']
else:
return self.stack.get_availability_zones()
class ParamRef(function.Function):
'''
A function for resolving parameter references.
Takes the form::
{ "Ref" : "<param_name>" }
'''
def __init__(self, stack, fn_name, args):
super(ParamRef, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
param_name = function.resolve(self.args)
try:
return self.parameters[param_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=param_name,
key='unknown')
class ResourceRef(function.Function):
'''
A function for resolving resource references.
Takes the form::
{ "Ref" : "<resource_name>" }
'''
def _resource(self, path='unknown'):
resource_name = function.resolve(self.args)
try:
return self.stack[resource_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=resource_name,
key=path)
def dependencies(self, path):
return itertools.chain(super(ResourceRef, self).dependencies(path),
[self._resource(path)])
def result(self):
return self._resource().FnGetRefId()
def Ref(stack, fn_name, args):
'''
A function for resolving parameters or resource references.
Takes the form::
{ "Ref" : "<param_name>" }
or::
{ "Ref" : "<resource_name>" }
'''
if args in stack:
RefClass = ResourceRef
else:
RefClass = ParamRef
return RefClass(stack, fn_name, args)
class GetAtt(function.Function):
'''
A function for resolving resource attributes.
Takes the form::
{ "Fn::GetAtt" : [ "<resource_name>",
"<attribute_name" ] }
'''
def __init__(self, stack, fn_name, args):
super(GetAtt, self).__init__(stack, fn_name, args)
self._resource_name, self._attribute = self._parse_args()
def _parse_args(self):
try:
resource_name, attribute = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute]') % self.fn_name)
return resource_name, attribute
def _resource(self, path='unknown'):
resource_name = function.resolve(self._resource_name)
try:
return self.stack[resource_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=resource_name,
key=path)
def dependencies(self, path):
return itertools.chain(super(GetAtt, self).dependencies(path),
[self._resource(path)])
def validate(self):
super(GetAtt, self).validate()
res = self._resource()
attr = function.resolve(self._attribute)
if (type(res).FnGetAtt == resource.Resource.FnGetAtt and
attr not in res.attributes_schema.keys()):
raise exception.InvalidTemplateAttribute(
resource=self._resource_name, key=attr)
def result(self):
attribute = function.resolve(self._attribute)
r = self._resource()
if (r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME, r.UPDATE)):
return r.FnGetAtt(attribute)
else:
return None
class Select(function.Function):
'''
A function for selecting an item from a list or map.
Takes the form (for a list lookup)::
{ "Fn::Select" : [ "<index>", [ "<value_1>", "<value_2>", ... ] ] }
Takes the form (for a map lookup)::
{ "Fn::Select" : [ "<index>", { "<key_1>": "<value_1>", ... } ] }
If the selected index is not found, this function resolves to an empty
string.
'''
def __init__(self, stack, fn_name, args):
super(Select, self).__init__(stack, fn_name, args)
try:
self._lookup, self._strings = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[index, collection]') % self.fn_name)
def result(self):
index = function.resolve(self._lookup)
try:
index = int(index)
except (ValueError, TypeError):
pass
strings = function.resolve(self._strings)
if strings == '':
# an empty string is a common response from other
# functions when result is not currently available.
# Handle by returning an empty string
return ''
if isinstance(strings, basestring):
# might be serialized json.
try:
strings = json.loads(strings)
except ValueError as json_ex:
fmt_data = {'fn_name': self.fn_name,
'err': json_ex}
raise ValueError(_('"%(fn_name)s": %(err)s') % fmt_data)
if isinstance(strings, collections.Mapping):
if not isinstance(index, basestring):
raise TypeError(_('Index to "%s" must be a string') %
self.fn_name)
return strings.get(index, '')
if (isinstance(strings, collections.Sequence) and
not isinstance(strings, basestring)):
if not isinstance(index, (int, long)):
raise TypeError(_('Index to "%s" must be an integer') %
self.fn_name)
try:
return strings[index]
except IndexError:
return ''
if strings is None:
return ''
raise TypeError(_('Arguments to %s not fully resolved') %
self.fn_name)
class Join(function.Function):
'''
A function for joining strings.
Takes the form::
{ "Fn::Join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
'''
def __init__(self, stack, fn_name, args):
super(Join, self).__init__(stack, fn_name, args)
example = '"%s" : [ " ", [ "str1", "str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (basestring, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if strings is None:
strings = []
if (isinstance(strings, basestring) or
not isinstance(strings, collections.Sequence)):
raise TypeError(_('"%s" must operate on a list') % self.fn_name)
delim = function.resolve(self._delim)
if not isinstance(delim, basestring):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
if s is None:
return ''
if not isinstance(s, basestring):
raise TypeError(
_('Items to join must be strings %s') % (repr(s)[:200]))
return s
return delim.join(ensure_string(s) for s in strings)
class Split(function.Function):
'''
A function for splitting strings.
Takes the form::
{ "Fn::Split" : [ "<delim>", "<string_1><delim><string_2>..." ] }
And resolves to::
[ "<string_1>", "<string_2>", ... ]
'''
def __init__(self, stack, fn_name, args):
super(Split, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "str1,str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (basestring, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if not isinstance(self._delim, basestring):
raise TypeError(_("Delimiter for %s must be string") %
self.fn_name)
if not isinstance(strings, basestring):
raise TypeError(_("String to split must be string; got %s") %
type(strings))
return strings.split(self._delim)
class Replace(function.Function):
'''
A function for performing string substitutions.
Takes the form::
{ "Fn::Replace" : [
{ "<key_1>": "<value_1>", "<key_2>": "<value_2>", ... },
"<key_1> <key_2>"
] }
And resolves to::
"<value_1> <value_2>"
This is implemented using python str.replace on each key. The order in
which replacements are performed is undefined.
'''
def __init__(self, stack, fn_name, args):
super(Replace, self).__init__(stack, fn_name, args)
self._mapping, self._string = self._parse_args()
if not isinstance(self._mapping, collections.Mapping):
raise TypeError(_('"%s" parameters must be a mapping') %
self.fn_name)
def _parse_args(self):
example = ('{"%s": '
'[ {"$var1": "foo", "%%var2%%": "bar"}, '
'"$var1 is %%var2%%"]}' % self.fn_name)
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (basestring, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
mapping, string = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
else:
return mapping, string
def result(self):
template = function.resolve(self._string)
mapping = function.resolve(self._mapping)
if not isinstance(template, basestring):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
if not isinstance(mapping, collections.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(string, change):
placeholder, value = change
if not isinstance(placeholder, basestring):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
if value is None:
value = ''
if not isinstance(value, (basestring, int, long, float, bool)):
raise TypeError(_('"%s" params must be strings or numbers') %
self.fn_name)
return string.replace(placeholder, unicode(value))
return reduce(replace, six.iteritems(mapping), template)
class Base64(function.Function):
'''
A placeholder function for converting to base64.
Takes the form::
{ "Fn::Base64" : "<string>" }
This function actually performs no conversion. It is included for the
benefit of templates that convert UserData to Base64. Heat accepts UserData
in plain text.
'''
def result(self):
resolved = function.resolve(self.args)
if not isinstance(resolved, basestring):
raise TypeError(_('"%s" argument must be a string') % self.fn_name)
return resolved
class MemberListToMap(function.Function):
'''
A function for converting lists containing enumerated keys and values to
a mapping.
Takes the form::
{ 'Fn::MemberListToMap' : [ 'Name',
'Value',
[ '.member.0.Name=<key_0>',
'.member.0.Value=<value_0>',
... ] ] }
And resolves to::
{ "<key_0>" : "<value_0>", ... }
The first two arguments are the names of the key and value.
'''
def __init__(self, stack, fn_name, args):
super(MemberListToMap, self).__init__(stack, fn_name, args)
try:
self._keyname, self._valuename, self._list = self.args
except ValueError:
correct = '''
{'Fn::MemberListToMap': ['Name', 'Value',
['.member.0.Name=key',
'.member.0.Value=door']]}
'''
raise TypeError(_('Wrong Arguments try: "%s"') % correct)
if not isinstance(self._keyname, basestring):
raise TypeError(_('%s Key Name must be a string') % self.fn_name)
if not isinstance(self._valuename, basestring):
raise TypeError(_('%s Value Name must be a string') % self.fn_name)
def result(self):
member_list = function.resolve(self._list)
if not isinstance(member_list, collections.Iterable):
raise TypeError(_('Member list must be a list'))
def item(s):
if not isinstance(s, basestring):
raise TypeError(_("Member list items must be strings"))
return s.split('=', 1)
partials = dict(item(s) for s in member_list)
return aws_utils.extract_param_pairs(partials,
prefix='',
keyname=self._keyname,
valuename=self._valuename)
class ResourceFacade(function.Function):
'''
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
{ "Fn::ResourceFacade": "<attribute_type>" }
where the valid attribute types are "Metadata", "DeletionPolicy" and
"UpdatePolicy".
'''
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'Metadata', 'DeletionPolicy', 'UpdatePolicy'
)
def __init__(self, stack, fn_name, args):
super(ResourceFacade, self).__init__(stack, fn_name, args)
if self.args not in self._RESOURCE_ATTRIBUTES:
fmt_data = {'fn_name': self.fn_name,
'allowed': ', '.join(self._RESOURCE_ATTRIBUTES)}
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be one of: %(allowed)s') % fmt_data)
def result(self):
attr = function.resolve(self.args)
if attr == self.METADATA:
return self.stack.parent_resource.metadata_get()
elif attr == self.UPDATE_POLICY:
up = self.stack.parent_resource.t.get('UpdatePolicy', {})
return function.resolve(up)
elif attr == self.DELETION_POLICY:
dp = self.stack.parent_resource.t.deletion_policy()
return function.resolve(dp)
|
py | 1a37555e7f09b70747389d72133cd5f215d7d985 | import hashlib
import itertools
import logging
from collections import OrderedDict
from typing import Any, Generator, Iterable, List, Mapping, Type, Union
logger = logging.getLogger(__name__)
def make_hash(data: Union[List, OrderedDict]) -> str:
return hashlib.md5(str(data).encode()).hexdigest()
def ensure_list(value: Any) -> List[Any]:
""" Convert or unpack any iterable into a list, with the exception of mappings.
If the passed value is either a mapping or not an iterable, it is returned
wrapped in a list.
Example:
>>> iterable = [1,2,3]
>>> ensure_iterable(iterable)
>>> [1,2,3]
>>> mapping = {"a": 1, "b": 2}
>>> ensure_iterable(mapping)
>>> [{"a": 1, "b": 2}]
>>> scalar = "hello world!"
>>> ensure_iterable(scalar)
>>> ["hello world!"]
"""
if isinstance(value, (Mapping, str)): # do not unpack dictionaries
return [value]
elif isinstance(value, Iterable):
return list(value)
else:
return [value]
def reduce(values: Iterable) -> Union[Iterable, Any]:
""" Reduce an iterable to a scalar if length is 1. Returns None if iterable
is empty. """
try:
while isinstance(values, Iterable) and not isinstance(values, (Mapping, str)):
values = list(values)
if len(values) <= 1:
values = values[0]
else:
break
return values
except IndexError:
return None
def chunks(iterable: Iterable, n: int = 1000, cls: Type = list) -> Generator:
""" Slice and unpack a nested iterable into a flat iterable containing a
maximum of n elements.
Arguments:
iterable {Iterable} -- items to process
Keyword Arguments:
n {int} -- max number of elements per chunk (default: 1000)
cls {Type} -- iterable type in which to cast chunks (default: list)
Yields:
Generator -- generator of iterables
"""
it = iter(iterable)
while True:
chunked = itertools.islice(it, n)
try:
first_element = next(chunked)
except StopIteration:
return
yield cls(itertools.chain((first_element,), chunked))
|
py | 1a37574a1fc2cc7dcd83edfd186118bd2b724402 | import discord
from discord.ext import commands
from logger import getLogger
l = getLogger("main")
class Curation(commands.Cog, description="Information about curating games for Flashpoint."):
def __init__(self, bot):
self.bot = bot
@commands.command(name="curation", aliases=["ct", "curation-tutorial"], brief="Curation tutorial.",
description="Curation tutorial.")
async def curation_tutorial(self, ctx: discord.ext.commands.Context):
l.debug(
f"curation tutorial command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("Curation tutorial:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Curation_Tutorial>")
@commands.command(name="not-accepted", aliases=["notaccepted", "disallowed", "blacklist", "blacklisted", "na"],
brief="Not accepted curations.", description="A list of curations not accepted in Flashpoint.")
async def not_accepted(self, ctx: discord.ext.commands.Context):
l.debug(
f"not-accepted command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("These are games/animations not allowed in Flashpoint for any reason:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Not_Accepted_Curations>")
@commands.command(name="meta", aliases=["curation-format", "format", "metadata", "cf"], brief="Metadata file.")
async def meta(self, ctx: discord.ext.commands.Context):
l.debug(f"meta command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("List of Metadata Fields:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Curation_Format#List_of_Metadata_Fields>")
@commands.command(name="tags", brief="Tags in Flashpoint.", description="A list of tags in Flashpoint.")
async def tags(self, ctx: discord.ext.commands.Context):
l.debug(f"tags command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("List of Tags:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Tags>")
@commands.command(name="lang", aliases=["langs", "languages"], brief="Language codes.",
description="A list of ISO631-1 codes from Wikipedia.")
async def lang(self, ctx: discord.ext.commands.Context):
l.debug(f"lang command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("List of Language Codes:\n"
"🔗 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>")
@commands.command(name="edits", aliases=["pending", "fixes", "pendies"], brief="Pending fixes.",
description="Information about making metadata fixes.")
async def edits(self, ctx: discord.ext.commands.Context):
l.debug(f"edits command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("Making metadata edits:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Metadata_Edits>")
def setup(bot: commands.Bot):
bot.add_cog(Curation(bot))
|
py | 1a375779c4d9bbf35e36e390af6c30d895a9d4f6 | """
Using a while loop with an and conditional
"""
if __name__ == "__main__":
i = 0
while( (input("Enter your name: ")!= "your name") and i < 10):
print("Nope sorry try again")
i = i + 1
print("Done with program") |
py | 1a3757c16fbae761cfe22f6b7b8060134a40cb77 | # -*- coding: utf-8 -*-
from __future__ import print_function
import collections
from operator import itemgetter
import os.path
import oursql
import phpserialize as php
import wmflabs
def is_autopatrol(log_params):
p = php.loads(log_params)
return p["6::auto"] == 1
def get_patrol_stats(db, oldest_ts):
with db.cursor() as c:
c.execute("""select log_user_text, log_params from logging
where log_action = 'patrol' and log_timestamp > ?
order by log_timestamp
desc""",
params=[oldest_ts])
patrols = collections.Counter()
for log_user_text, log_params in c:
if not is_autopatrol(log_params):
patrols[log_user_text.decode("utf-8")] += 1
return collections.OrderedDict(reversed(sorted(patrols.items(), key=itemgetter(1))))
db.close()
def connect(db_name, addr=None):
"""
Connect to database.
Args:
db_name (str): name of db without '_p' suffix
addr (tuple): tuple like (host, port)
"""
if addr is None:
return wmflabs.db.connect(db_name)
host, port = addr
port = int(port)
return oursql.connect(db=db_name + '_p',
host=host,
port=port,
read_default_file=os.path.expanduser("~/replica.my.cnf"),
charset=None,
use_unicode=False,
)
|
py | 1a37581e63d9db5a364bca8d70810b71511d2fad | """
sentry.runner.commands.config
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import click
from sentry.runner.decorators import configuration
@click.group()
def config():
"Manage runtime config options."
@config.command()
@click.option('--silent', default=False, is_flag=True, help='Suppress extraneous output.')
@click.argument('option')
@configuration
def get(option, silent):
"Get a configuration option."
from django.conf import settings
from sentry.options import default_manager as manager
from sentry.options.manager import UnknownOption
try:
key = manager.lookup_key(option)
except UnknownOption:
raise click.ClickException('unknown option: %s' % option)
value = manager.get(key.name)
if silent:
click.echo(value)
return
# TODO(mattrobenolt): Add help to option keys
# if key.help:
# click.echo(key.help + '\n')
click.echo(' from config: %s' % settings.SENTRY_OPTIONS.get(key.name, '<not set>'))
click.echo(' current: %s' % value)
@config.command()
@click.argument('option')
@click.argument('value')
@configuration
def set(option, value):
"Set a configuration option to a new value."
from sentry import options
from sentry.options.manager import UnknownOption
try:
options.set(option, value)
except UnknownOption:
raise click.ClickException('unknown option: %s' % option)
except TypeError as e:
raise click.ClickException(unicode(e))
@config.command()
@click.option('--no-input', default=False, is_flag=True, help='Do not show confirmation.')
@click.argument('option')
@configuration
def delete(option, no_input):
"Delete/unset a configuration option."
from sentry import options
from sentry.options.manager import UnknownOption
if not no_input:
click.confirm('Are you sure you want to delete "%s"?' % option, default=False, abort=True)
try:
options.delete(option)
except UnknownOption:
raise click.ClickException('unknown option: %s' % option)
|
py | 1a375873f1063658aa1ff5f9a85624b295bab76b | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" PauliSumOp Class """
import logging
from typing import Dict, List, Optional, Set, Tuple, Union, cast
import numpy as np
from scipy.sparse import spmatrix
from qiskit.circuit import Instruction, ParameterExpression
from qiskit.quantum_info import Pauli, SparsePauliOp
from qiskit.quantum_info.operators.symplectic.pauli_table import PauliTable
from qiskit.quantum_info.operators.custom_iterator import CustomIterator
from ..exceptions import OpflowError
from ..list_ops.summed_op import SummedOp
from ..list_ops.tensored_op import TensoredOp
from ..operator_base import OperatorBase
from .primitive_op import PrimitiveOp
logger = logging.getLogger(__name__)
class PauliSumOp(PrimitiveOp):
"""Class for Operators backend by Terra's ``SparsePauliOp`` class."""
def __init__(
self,
primitive: SparsePauliOp,
coeff: Union[int, float, complex, ParameterExpression] = 1.0,
) -> None:
"""
Args:
primitive: The SparsePauliOp which defines the behavior of the underlying function.
coeff: A coefficient multiplying the primitive.
Raises:
TypeError: invalid parameters.
"""
if not isinstance(primitive, SparsePauliOp):
raise TypeError(
f"PauliSumOp can only be instantiated with SparsePauliOp, not {type(primitive)}"
)
super().__init__(primitive, coeff=coeff)
def primitive_strings(self) -> Set[str]:
return {"SparsePauliOp"}
@property
def num_qubits(self) -> int:
return self.primitive.num_qubits # type: ignore
@property
def coeffs(self):
"""Return the Pauli coefficients."""
return self.coeff * self.primitive.coeffs
def matrix_iter(self, sparse=False):
"""Return a matrix representation iterator.
This is a lazy iterator that converts each term in the PauliSumOp
into a matrix as it is used. To convert to a single matrix use the
:meth:`to_matrix` method.
Args:
sparse (bool): optionally return sparse CSR matrices if True,
otherwise return Numpy array matrices
(Default: False)
Returns:
MatrixIterator: matrix iterator object for the PauliTable.
"""
class MatrixIterator(CustomIterator):
"""Matrix representation iteration and item access."""
def __repr__(self):
return "<PauliSumOp_matrix_iterator at {}>".format(hex(id(self)))
def __getitem__(self, key):
sumopcoeff = self.obj.coeff * self.obj.primitive.coeffs[key]
mat = PauliTable._to_matrix(self.obj.primitive.table.array[key],
sparse=sparse)
return sumopcoeff * mat
return MatrixIterator(self)
def add(self, other: OperatorBase) -> OperatorBase:
if not self.num_qubits == other.num_qubits:
raise ValueError(
f"Sum of operators with different numbers of qubits, {self.num_qubits} and "
f"{other.num_qubits}, is not well defined"
)
if isinstance(other, PauliSumOp):
return PauliSumOp(
self.coeff * self.primitive + other.coeff * other.primitive, coeff=1 # type: ignore
)
from .pauli_op import PauliOp
if isinstance(other, PauliOp):
return PauliSumOp(
self.coeff * self.primitive # type: ignore
+ other.coeff * SparsePauliOp(other.primitive)
)
return SummedOp([self, other])
def mul(self, scalar: Union[int, float, complex, ParameterExpression]) -> OperatorBase:
if isinstance(scalar, (int, float, complex)) and scalar != 0:
return PauliSumOp(scalar * self.primitive, coeff=self.coeff) # type: ignore
return super().mul(scalar)
def adjoint(self) -> OperatorBase:
return PauliSumOp(
self.primitive.conjugate(), coeff=self.coeff.conjugate() # type:ignore
)
def equals(self, other: OperatorBase) -> bool:
self_reduced, other_reduced = self.reduce(), other.reduce()
if not isinstance(other_reduced, PauliSumOp):
return False
if isinstance(self_reduced.coeff, ParameterExpression) or isinstance(
other_reduced.coeff, ParameterExpression
):
return (
self_reduced.coeff == other_reduced.coeff
and self_reduced.primitive == other_reduced.primitive # type:ignore
)
return (
len(self_reduced) == len(other_reduced)
and self_reduced.primitive == other_reduced.primitive
)
def _expand_dim(self, num_qubits: int) -> "PauliSumOp":
return PauliSumOp(
self.primitive.tensor( # type:ignore
SparsePauliOp(Pauli("I" * num_qubits))
),
coeff=self.coeff,
)
def tensor(self, other: OperatorBase) -> OperatorBase:
if isinstance(other, PauliSumOp):
return PauliSumOp(
self.primitive.tensor(other.primitive), # type:ignore
coeff=self.coeff * other.coeff,
)
return TensoredOp([self, other])
def permute(self, permutation: List[int]) -> "PauliSumOp":
"""Permutes the sequence of ``PauliSumOp``.
Args:
permutation: A list defining where each Pauli should be permuted. The Pauli at index
j of the primitive should be permuted to position permutation[j].
Returns:
A new PauliSumOp representing the permuted operator. For operator (X ^ Y ^ Z) and
indices=[1,2,4], it returns (X ^ I ^ Y ^ Z ^ I).
Raises:
OpflowError: if indices do not define a new index for each qubit.
"""
if len(permutation) != self.num_qubits:
raise OpflowError("List of indices to permute must have the "
"same size as Pauli Operator")
length = max(permutation) + 1
spop = self.primitive.tensor( # type:ignore
SparsePauliOp(Pauli("I" * (length - self.num_qubits)))
)
permutation = [i for i in range(length) if i not in permutation] + permutation
permutation = np.arange(length)[np.argsort(permutation)]
permutation = np.hstack([permutation, permutation + length]) # type: ignore
spop.table.array = spop.table.array[:, permutation]
return PauliSumOp(spop, self.coeff)
def compose(
self,
other: OperatorBase,
permutation: Optional[List[int]] = None,
front: bool = False,
) -> OperatorBase:
new_self, other = self._expand_shorter_operator_and_permute(other, permutation)
new_self = cast(PauliSumOp, new_self)
if front:
return other.compose(new_self)
# If self is identity, just return other.
if not np.any(new_self.primitive.table.array): # type: ignore
return other * new_self.coeff * sum(new_self.coeffs) # type: ignore
# Both PauliSumOps
if isinstance(other, PauliSumOp):
return PauliSumOp(
new_self.primitive * other.primitive, # type:ignore
coeff=new_self.coeff * other.coeff,
)
# TODO: implement compose with PauliOp
# pylint: disable=cyclic-import,import-outside-toplevel
from ..state_fns.circuit_state_fn import CircuitStateFn
from .circuit_op import CircuitOp
if isinstance(other, (CircuitOp, CircuitStateFn)):
return new_self.to_pauli_op().to_circuit_op().compose(other) # type: ignore
return super(PauliSumOp, new_self).compose(other)
def to_matrix(self, massive: bool = False) -> np.ndarray:
OperatorBase._check_massive("to_matrix", True, self.num_qubits, massive)
if isinstance(self.coeff, ParameterExpression):
return (self.primitive.to_matrix(sparse=True)).toarray() * self.coeff # type: ignore
return (self.primitive.to_matrix(sparse=True) * self.coeff).toarray() # type: ignore
def __str__(self) -> str:
def format_sign(x):
return x.real if np.isreal(x) else x
def format_number(x):
x = format_sign(x)
if isinstance(x, (int, float)) and x < 0:
return f"- {-x}"
return f"+ {x}"
indent = "" if self.coeff == 1 else " "
prim_list = self.primitive.to_list() # type: ignore
if prim_list:
first = prim_list[0]
if isinstance(first[1], (int, float)) and first[1] < 0:
main_string = indent + f"- {-first[1].real} * {first[0]}"
else:
main_string = indent + f"{format_sign(first[1])} * {first[0]}"
main_string += "".join([f"\n{indent}{format_number(c)} * {p}" for p, c in prim_list[1:]])
return f"{main_string}" if self.coeff == 1 else f"{self.coeff} * (\n{main_string}\n)"
def eval(
self,
front: Optional[Union[str, Dict[str, complex], np.ndarray, OperatorBase]] = None,
) -> Union[OperatorBase, float, complex]:
if front is None:
return self.to_matrix_op()
# pylint: disable=import-outside-toplevel,cyclic-import
from ..list_ops.list_op import ListOp
from ..state_fns.circuit_state_fn import CircuitStateFn
from ..state_fns.dict_state_fn import DictStateFn
from ..state_fns.state_fn import StateFn
from .circuit_op import CircuitOp
from .pauli_op import PauliOp
# For now, always do this. If it's not performant, we can be more granular.
if not isinstance(front, OperatorBase):
front = StateFn(front, is_measurement=False)
if isinstance(front, ListOp) and front.distributive:
return front.combo_fn(
[self.eval(front.coeff * front_elem) for front_elem in front.oplist] # type: ignore
)
else:
if self.num_qubits != front.num_qubits:
raise ValueError(
"eval does not support operands with differing numbers of qubits, "
"{} and {}, respectively.".format(self.num_qubits, front.num_qubits)
)
if isinstance(front, DictStateFn):
new_dict = {} # type: Dict
corrected_x_bits = self.primitive.table.X[::-1] # type: ignore
corrected_z_bits = self.primitive.table.Z[::-1] # type: ignore
coeffs = self.primitive.coeffs # type:ignore
for bstr, v in front.primitive.items():
bitstr = np.asarray(list(bstr)).astype(np.int).astype(np.bool)
new_b_str = np.logical_xor(bitstr, corrected_x_bits)
new_str = ["".join(map(str, 1 * bs)) for bs in new_b_str]
z_factor = np.product(1 - 2 * np.logical_and(bitstr, corrected_z_bits), axis=1)
y_factor = np.product(
np.sqrt(1 - 2 * np.logical_and(corrected_x_bits, corrected_z_bits) + 0j),
axis=1,
)
for i, n_str in enumerate(new_str):
new_dict[n_str] = (
v * z_factor[i] * y_factor[i] * coeffs[i]
) + new_dict.get(n_str, 0)
return DictStateFn(new_dict, coeff=self.coeff * front.coeff)
elif isinstance(front, StateFn) and front.is_measurement:
raise ValueError("Operator composed with a measurement is undefined.")
# Composable types with PauliOp
elif isinstance(front, (PauliSumOp, PauliOp, CircuitOp, CircuitStateFn)):
return self.compose(front).eval() # type: ignore
# Covers VectorStateFn and OperatorStateFn
return self.to_matrix_op().eval(front.to_matrix_op()) # type: ignore
def exp_i(self) -> OperatorBase:
""" Return a ``CircuitOp`` equivalent to e^-iH for this operator H. """
# TODO: optimize for some special cases
from ..evolutions.evolved_op import EvolvedOp
return EvolvedOp(self)
def to_instruction(self) -> Instruction:
return self.to_matrix_op().to_circuit().to_instruction() # type: ignore
def to_pauli_op(self, massive: bool = False) -> OperatorBase:
from .pauli_op import PauliOp
def to_real(x):
return x.real if np.isreal(x) else x
def to_native(x):
return x.item() if isinstance(x, np.generic) else x
if len(self.primitive) == 1:
return PauliOp(
Pauli((self.primitive.table.Z[0], self.primitive.table.X[0])), # type: ignore
to_native(to_real(self.primitive.coeffs[0])) * self.coeff, # type: ignore
)
return SummedOp(
[
PauliOp(
Pauli((s.table.Z[0], s.table.X[0])),
to_native(to_real(s.coeffs[0])),
)
for s in self.primitive
],
coeff=self.coeff,
)
def __getitem__(self, offset: Union[int, slice]) -> "PauliSumOp":
"""Allows array-indexing style access to the ``PauliSumOp``.
Args:
offset: The index of ``PauliSumOp``.
Returns:
The ``PauliSumOp`` at index ``offset``,
"""
return PauliSumOp(self.primitive[offset], self.coeff)
def __len__(self) -> int:
"""Length of ``SparsePauliOp``.
Returns:
An int equal to the length of SparsePauliOp.
"""
return len(self.primitive)
# pylint: disable=arguments-differ
def reduce(self, atol: Optional[float] = None, rtol: Optional[float] = None) -> "PauliSumOp":
"""Simplify the primitive ``SparsePauliOp``.
Args:
atol: Absolute tolerance for checking if coefficients are zero (Default: 1e-8).
rtol: Relative tolerance for checking if coefficients are zero (Default: 1e-5).
Returns:
The simplified ``PauliSumOp``.
"""
if isinstance(self.coeff, (int, float, complex)):
primitive = self.coeff * self.primitive # type: ignore
return PauliSumOp(primitive.simplify(atol=atol, rtol=rtol)) # type: ignore
return PauliSumOp(self.primitive.simplify(atol=atol, rtol=rtol), self.coeff) # type: ignore
def to_spmatrix(self) -> spmatrix:
"""Returns SciPy sparse matrix representation of the ``PauliSumOp``.
Returns:
CSR sparse matrix representation of the ``PauliSumOp``.
Raises:
ValueError: invalid parameters.
"""
return self.primitive.to_matrix(sparse=True) * self.coeff # type: ignore
@classmethod
def from_list(
cls,
pauli_list: List[Tuple[str, Union[int, float, complex]]],
coeff: Union[int, float, complex, ParameterExpression] = 1.0,
) -> "PauliSumOp":
"""Construct from a pauli_list with the form [(pauli_str, coeffs)]
Args:
pauli_list: A list of Tuple of pauli_str and coefficient.
coeff: A coefficient multiplying the primitive.
Returns:
The PauliSumOp constructed from the pauli_list.
"""
return cls(SparsePauliOp.from_list(pauli_list), coeff=coeff)
|
py | 1a3759355500ac11f6b881f2c3a389e77ae86f57 | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='sample',
version='0.1.0',
description='RPyOpenCL',
long_description=readme,
author='Shazz',
author_email='[email protected]',
url='https://github.com/shazz/DistributedOpenCL',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
|
py | 1a375948ec3668a2df7e97da0ac611cdc4df4d81 | from game import constants
import os
class HighScore():
"""
This class initializes the high score. Gets the value from text file.
"""
def __init__(self):
self.scoreFile = open(os.path.join(constants.PATH, "./highScore/highScore.txt"), "r")
self.highScoreRead = self.scoreFile.read()
self.scoreFile.close()
def newHighScore(self, score):
"""
Changes the high score if reached
"""
self.highScore = str(score)
self.scoreFile = open(os.path.join(constants.PATH, "./highScore/highScore.txt"), "w")
#self.scoreFile = open("highScore/highScore.txt", "w")
self.scoreFile.write(self.highScore)
self.scoreFile.close() |
py | 1a3759605556abcb4e46ebc76d43aeec3eef789a | from common import * # NOQA
from gdapi import ApiError
from requests.auth import AuthBase
import requests
from cattle import from_env
class LocalAuth(AuthBase):
def __init__(self, jwt, prj_id=None):
# setup any auth-related data here
self.jwt = jwt
self.prj_id = prj_id
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = 'Bearer ' + self.jwt
if self.prj_id is not None:
r.headers['X-API-Project-Id'] = self.prj_id
return r
@pytest.fixture(scope='module', autouse=True)
def turn_on_off_local_auth(request, admin_user_client):
username = random_str()
password = random_str()
admin_user_client.create_localAuthConfig(enabled=True,
username=username,
password=password)
def fin():
admin_user_client.create_localAuthConfig(enabled=None,
username=username,
password=password)
# Proves auth is off because keys are invalid and would be reject
assert from_env(access_key='bad_key', secret_key='bad_key2').valid()
request.addfinalizer(fin)
def make_user_and_client(admin_user_client, name_base='user ',
username=None, password=None):
if username is None:
username = name_base + random_str()
if password is None:
password = 'password ' + random_str()
account = admin_user_client.create_account(name=name_base + random_str(),
kind="user")
admin_user_client.wait_success(account)
login = admin_user_client.create_password(publicValue=username,
secretValue=password,
accountId=account.id)
admin_user_client.wait_success(login)
start_client = make_client(admin_user_client, account, username, password)
return start_client, account, username, password
def make_client(admin_user_client, account, username, password):
key = admin_user_client.create_apiKey()
admin_user_client.wait_success(key)
start_client = from_env(url=cattle_url(),
access_key=key.publicValue,
secret_key=key.secretValue)
token = requests.post(base_url() + 'token', {
'code': username + ':' + password
})
token = token.json()
assert token['type'] != 'error'
jwt = token['jwt']
user = token['userIdentity']
assert user['login'] == username
assert token['user'] == username
start_client._auth = LocalAuth(jwt)
start_client._access_key = None
start_client._secret_key = None
start_client.reload_schema()
start_client.valid()
identities = start_client.list_identity()
assert len(identities) == 1
assert identities[0].externalId == account.id
assert identities[0].login == username
projects = start_client.list_project()
assert len(projects) == 1
members = projects[0].projectMembers()
assert len(members) == 1
member = get_plain_member(members[0])
assert member['externalId'] == identities[0].externalId
assert member['externalIdType'] == identities[0].externalIdType
assert member['role'] == 'owner'
return start_client
@pytest.mark.nonparallel
def test_local_login(admin_user_client, request):
client, account, username, password =\
make_user_and_client(admin_user_client)
identities = client.list_identity()
projects = client.list_project()
assert len(projects) == 1
assert len(identities) == 1
assert identities[0].externalId == account.id
client, account, username, password =\
make_user_and_client(admin_user_client, password=" " + random_str())
identities = client.list_identity()
projects = client.list_project()
assert len(projects) == 1
assert len(identities) == 1
assert identities[0].externalId == account.id
@pytest.mark.nonparallel
def test_local_login_only_create1_project(admin_user_client):
client, account, username, password =\
make_user_and_client(admin_user_client)
identities = client.list_identity()
projects = client.list_project()
original_projects = len(projects)
assert original_projects != 0
assert len(identities) == 1
assert identities[0].externalId == account.id
client = make_client(admin_user_client, account, username, password)
identities = client.list_identity()
projects = client.list_project()
assert len(projects) == original_projects
assert len(identities) == 1
assert identities[0].externalId == account.id
@pytest.mark.nonparallel
def test_local_login_change_password(admin_user_client):
client, account, username, password =\
make_user_and_client(admin_user_client)
credential = client.list_password()
assert len(credential) == 1
assert credential[0].publicValue == username
newPass = random_str()
credential[0].changesecret(oldSecret=password, newSecret=newPass)
client, account, username, password =\
make_user_and_client(admin_user_client)
identities = client.list_identity()
assert len(identities) == 1
assert identities[0].externalId == account.id
@pytest.mark.nonparallel
def test_local_incorrect_login(admin_user_client):
token = requests.post(base_url() + 'token',
{
'code': random_str() + ':' + random_str()
})
assert token.status_code == 401
token = token.json()
assert token['type'] == 'error'
assert token['status'] == 401
@pytest.mark.nonparallel
def test_local_project_members(admin_user_client):
user1_client, account, username, password =\
make_user_and_client(admin_user_client)
user1_identity = None
for obj in user1_client.list_identity():
if obj.externalIdType == 'rancher_id':
user1_identity = obj
break
user2_client, account, username, password =\
make_user_and_client(admin_user_client)
user2_identity = None
for obj in user2_client.list_identity():
if obj.externalIdType == 'rancher_id':
user2_identity = obj
break
project = user1_client.create_project(members=[
idToMember(user1_identity, 'owner'),
idToMember(user2_identity, 'member')
])
admin_user_client.wait_success(project)
assert user1_client.by_id('project', project.id) is not None
assert user2_client.by_id('project', project.id) is not None
def idToMember(identity, role):
return {
'externalId': identity.externalId,
'externalIdType': identity.externalIdType,
'role': role
}
@pytest.mark.nonparallel
def test_local_project_create(admin_user_client):
user1_client, account, username, password =\
make_user_and_client(admin_user_client)
identity = None
for obj in user1_client.list_identity():
if obj.externalIdType == 'rancher_id':
identity = obj
break
members = [idToMember(identity, 'owner')]
project = user1_client.create_project(members=members)
project = user1_client.wait_success(project)
assert project is not None
user1_client.delete(project)
@pytest.mark.nonparallel
def test_get_correct_identity(admin_user_client):
name = "Identity User"
context = create_context(admin_user_client, name=name)
identities = context.user_client.list_identity()
assert len(identities) == 1
assert identities[0].name == name
@pytest.mark.nonparallel
def test_search_identity_name(admin_user_client):
usernames = []
for x in range(0, 5):
client, account, username, password =\
make_user_and_client(admin_user_client)
usernames.append(username)
user_client = create_context(admin_user_client).user_client
for username in usernames:
ids = user_client\
.list_identity(name=username)
assert len(ids) == 1
assert ids[0].login == username
identity = user_client.by_id('identity', id=ids[0].id)
assert identity.name == ids[0].name
assert identity.id == ids[0].id
assert identity.externalId == ids[0].externalId
assert identity.externalIdType == ids[0].externalIdType
@pytest.mark.nonparallel
def test_search_identity_name_like(admin_user_client, request):
name_base = random_str()
usernames = []
for x in range(0, 5):
client, account, username, password =\
make_user_and_client(admin_user_client,
name_base=name_base)
usernames.append(username)
identities = admin_user_client.list_identity(all=name_base)
assert len(identities) == 5
assert len(usernames) == 5
found = 0
for identity in identities:
for username in usernames:
if (identity.login == username):
found += 1
assert found == 5
@pytest.mark.nonparallel
def test_inactive_active_login_account(admin_user_client, request):
client, account, username, password =\
make_user_and_client(admin_user_client)
identities = client.list_identity()
projects = client.list_project()
assert len(projects) != 0
assert len(identities) == 1
assert identities[0].externalId == account.id
account = admin_user_client.by_id("account", account.id)
account.deactivate()
admin_user_client.wait_success(account)
with pytest.raises(ApiError) as e:
client.list_identity()
assert e.value.error.status == 401
token = requests.post(base_url() + 'token', {
'code': username + ':' + password
})
token = token.json()
assert token['type'] == 'error'
assert token['status'] == 401
account = admin_user_client.reload(account)
account.activate()
admin_user_client.wait_success(account)
client.reload_schema()
assert client.list_identity()[0]['login'] == username
token = requests.post(base_url() + 'token', {
'code': username + ':' + password
}).json()
assert token['user'] == username
assert token['userIdentity']['login'] == username
@pytest.mark.nonparallel
def test_deleted_account_login(admin_user_client, request):
client, account, username, password =\
make_user_and_client(admin_user_client)
identities = client.list_identity()
projects = client.list_project()
assert len(projects) == 1
assert len(identities) == 1
assert identities[0].externalId == account.id
account = admin_user_client.by_id("account", account.id)
account.deactivate()
admin_user_client.wait_success(account)
admin_user_client.delete(account)
with pytest.raises(ApiError) as e:
client.list_identity()
assert e.value.error.status == 401
token = requests.post(base_url() + 'token', {
'code': username + ':' + password
})
token = token.json()
assert token['type'] == 'error'
assert token['status'] == 401
account = admin_user_client.wait_success(account)
account.purge()
admin_user_client.wait_success(account)
client, account, username, password =\
make_user_and_client(admin_user_client,
username=username,
password=password)
assert client.list_identity()[0]['login'] == username
token = requests.post(base_url() + 'token', {
'code': username + ':' + password
}).json()
assert token['user'] == username
assert token['userIdentity']['login'] == username
new_projects = client.list_project()
assert len(new_projects) == 1
assert new_projects[0].id != projects[0].id
@pytest.mark.nonparallel
def test_list_members_inactive_deleted_member(admin_user_client):
user1_client, account, username, password =\
make_user_and_client(admin_user_client)
user2_client, account2, username, password =\
make_user_and_client(admin_user_client)
members = get_plain_members(user1_client.list_identity())
members[0]['role'] = 'owner'
project = user1_client.create_project(members=members)
project = user1_client.wait_success(project)
members = [
get_plain_member(user1_client.list_identity()[0]),
get_plain_member(user2_client.list_identity()[0])
]
members[0]['role'] = 'owner'
members[1]['role'] = 'member'
project.setmembers(members=members)
account2 = admin_user_client.by_id("account", account2.id)
account2.deactivate()
account2 = admin_user_client.wait_success(account2)
account2.remove()
account2 = admin_user_client.wait_success(account2)
account2.purge()
admin_user_client.wait_success(account2)
project = user1_client.by_id("project", project.id)
assert len(project.projectMembers()) == 1
@pytest.mark.nonparallel
def test_cant_create_multiple_users_same_login(admin_user_client):
user1_client, account, username, password =\
make_user_and_client(admin_user_client)
with pytest.raises(ApiError) as e:
make_user_and_client(admin_user_client,
username=username, password=password)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'publicValue'
@pytest.mark.nonparallel
def test_passwords_non_alpha_numeric_characters(admin_user_client):
chars = [':', ';', '@', '!', '#', '$', '%', '^', '&', '*', '(', ')',
'+', '/', '<', '>', '?']
name = random_str()
username = random_str()
account = admin_user_client.create_account(name=name,
kind="user")
admin_user_client.wait_success(account)
assert account.name == name
for char in chars:
password = 'the{}Ran{}pa22'.format(char, char)
key = admin_user_client.create_password(publicValue=username,
secretValue=password,
accountId=account.id)
key = admin_user_client.wait_success(key)
make_client(admin_user_client, account, username, password)
admin_user_client.wait_success(key.deactivate())
admin_user_client.delete(key)
key = admin_user_client.wait_success(key)
key.purge()
|
py | 1a375aa0e013b3a27c8e3385870df94c224595d1 | from .. import stats
from . import base
__all__ = ['Accuracy', 'RollingAccuracy']
class BaseAccuracy(base.MultiClassMetric):
@property
def bigger_is_better(self):
return True
@property
def requires_labels(self):
return True
class Accuracy(stats.Mean, BaseAccuracy):
"""Accuracy score, which is the percentage of exact matches.
Example:
::
>>> import math
>>> from creme import metrics
>>> from sklearn.metrics import accuracy_score
>>> y_true = [True, False, True, True, True]
>>> y_pred = [True, True, False, True, True]
>>> metric = metrics.Accuracy()
>>> for i, (y_t, y_p) in enumerate(zip(y_true, y_pred)):
... metric = metric.update(y_t, y_p)
... assert math.isclose(metric.get(), accuracy_score(y_true[:i+1], y_pred[:i+1]))
>>> metric
Accuracy: 0.6
"""
def update(self, y_true, y_pred):
return super().update(y_true == y_pred)
class RollingAccuracy(stats.RollingMean, BaseAccuracy):
"""Rolling accuracy score, which is the percentage of exact matches over a window.
Example:
::
>>> from creme import metrics
>>> y_true = [True, False, True, True, True]
>>> y_pred = [True, True, False, True, True]
>>> metric = metrics.RollingAccuracy(window_size=3)
>>> for y_t, y_p in zip(y_true, y_pred):
... print(metric.update(y_t, y_p))
RollingAccuracy: 1.
RollingAccuracy: 0.5
RollingAccuracy: 0.333333
RollingAccuracy: 0.333333
RollingAccuracy: 0.666667
"""
def update(self, y_true, y_pred):
return super().update(y_true == y_pred)
|
py | 1a375b18744d06bd7e2bb092dde20e7aad51ac01 | import utils as util
import os
import ImgSplit_multi_process
import SplitOnlyImage_multi_process
import shutil
from multiprocessing import Pool
from DOTA2COCO import DOTA2COCOTest, DOTA2COCOTrain
import argparse
wordname_5 = ['1', '2', '3', '4', '5']
def parse_args():
parser = argparse.ArgumentParser(description='prepare dota1')
parser.add_argument('--srcpath', default='/media/adminer/data/Rocketforce/Little/')
parser.add_argument('--dstpath', default=r'/media/adminer/data/Rocketforce/Little_mmdet/',
help='prepare data')
args = parser.parse_args()
return args
def single_copy(src_dst_tuple):
shutil.copyfile(*src_dst_tuple)
def filecopy(srcpath, dstpath, num_process=32):
pool = Pool(num_process)
filelist = util.GetFileFromThisRootDir(srcpath)
name_pairs = []
for file in filelist:
basename = os.path.basename(file.strip())
dstname = os.path.join(dstpath, basename)
name_tuple = (file, dstname)
name_pairs.append(name_tuple)
pool.map(single_copy, name_pairs)
def singel_move(src_dst_tuple):
shutil.move(*src_dst_tuple)
def filemove(srcpath, dstpath, num_process=32):
pool = Pool(num_process)
filelist = util.GetFileFromThisRootDir(srcpath)
name_pairs = []
for file in filelist:
basename = os.path.basename(file.strip())
dstname = os.path.join(dstpath, basename)
name_tuple = (file, dstname)
name_pairs.append(name_tuple)
pool.map(filemove, name_pairs)
def getnamelist(srcpath, dstfile):
filelist = util.GetFileFromThisRootDir(srcpath)
with open(dstfile, 'w') as f_out:
for file in filelist:
basename = util.mybasename(file)
f_out.write(basename + '\n')
def prepare(srcpath, dstpath):
"""
:param srcpath: train, val, test
train --> trainval1024, val --> trainval1024, test --> test1024
:return:
"""
if not os.path.exists(os.path.join(dstpath, 'test1024_2')):
os.mkdir(os.path.join(dstpath, 'test1024_2'))
if not os.path.exists(os.path.join(dstpath, 'trainval1024')):
os.mkdir(os.path.join(dstpath, 'trainval1024'))
split_train = ImgSplit_multi_process.splitbase(os.path.join(srcpath, 'train'),
os.path.join(dstpath, 'trainval1024'),
gap=200,
subsize=1024,
num_process=32,
ext='.tif'
)
split_train.splitdata(1)
split_val = ImgSplit_multi_process.splitbase(os.path.join(srcpath, 'val'),
os.path.join(dstpath, 'trainval1024'),
gap=200,
subsize=1024,
num_process=32,
ext='.tif'
)
split_val.splitdata(1)
# split_test = SplitOnlyImage_multi_process.splitbase(os.path.join(srcpath, 'test2', 'images'),
# os.path.join(dstpath, 'test1024_2', 'images'),
# gap=200,
# subsize=1024,
# num_process=32,
# ext='.tif'
# )
# split_test.splitdata(1)
DOTA2COCOTrain(os.path.join(dstpath, 'trainval1024'), os.path.join(dstpath, 'trainval1024', 'DOTA_trainval1024.json'), wordname_5, difficult='-1')
# DOTA2COCOTest(os.path.join(dstpath, 'test1024_2'), os.path.join(dstpath, 'test1024_2', 'DOTA_test1024_2.json'), wordname_5)
if __name__ == '__main__':
args = parse_args()
srcpath = args.srcpath
dstpath = args.dstpath
prepare(srcpath, dstpath) |
py | 1a375b2d68028a33bf8020038e4fa6a703acc9c7 | #coding=utf-8
class Round(object):
pass
class Battle(object):
def __init__(self, avatar_list):
assert(len(avatar_list) > 1)
self.avatar_list = avatar_list
self.history_rounds = []
self.cur_round = None
self.cur_turn = -1
def on_battle_begin(self):
for x in self.avatar_list:
x.battle = self
def on_battle_end(self):
pass
def is_end(self):
l = [x for x in self.avatar_list if x.is_alive()]
return len(l) <= 1
def new_round(self):
self.cur_round = Round()
self.cur_turn = (self.cur_turn + 1) % len(self.avatar_list)
def on_round_begin(self):
for x in self.avatar_list:
x.on_round_begin()
def on_round_end(self):
for x in self.avatar_list:
x.on_round_end()
self.history_rounds.insert(0, self.cur_round)
def start(self):
self.on_battle_begin()
while not self.is_end():
self.new_round()
self.on_round_begin()
action_player = self.avatar_list[self.cur_turn]
action_player.action()
self.on_round_end()
self.on_battle_end()
|
py | 1a375c47f0686ee715d22b19806302034631dda6 | import json
import logging
import os
import boto3
from botocore.exceptions import ClientError
def _generate_cloudwatch_rule_name(job_id: int, stage: str) -> str:
return f"{stage}-job-{job_id}"
def schedule(cron_schedule: str, job_id: int, is_active: bool) -> str:
"""
TODO: do not use project_path as an identifier for events
"""
cloudwatch_rule_name = _generate_cloudwatch_rule_name(job_id, os.getenv('STAGE', 'local'))
events = boto3.client('events', region_name=os.getenv('AWS_REGION_NAME'))
logging.info(f"Scheduling job (id:{job_id}): {cron_schedule} (active: {is_active})")
result = events.put_rule(
Name=cloudwatch_rule_name,
ScheduleExpression=f"cron({cron_schedule})", # TODO: convert default cron to AWS cron
State='ENABLED' if is_active else 'DISABLED'
)
rule_arn = result['RuleArn']
logging.info(f"Cloudwatch Event Rule was configured succesfully. Rule ARN: {rule_arn}")
res = events.put_targets(
Rule=cloudwatch_rule_name,
Targets=[
{
'Id': os.getenv('LAMBDA_PROXY_NAME'),
'Arn': os.getenv('LAMBDA_PROXY_ARN'),
'Input': json.dumps({'job_id': job_id}),
}
]
)
logging.info(f"Configured target for CW rule: {res}")
return rule_arn # TODO: store it somewhere
def enable_job_schedule(job_id: int):
events = boto3.client('events', region_name=os.getenv('AWS_REGION_NAME'))
rule_name = _generate_cloudwatch_rule_name(job_id, os.getenv('STAGE', 'local'))
events.enable_rule(Name=rule_name)
logging.info(f"Schedule rule {rule_name} enabled")
def disable_job_schedule(job_id: int):
events = boto3.client('events', region_name=os.getenv('AWS_REGION_NAME'))
rule_name = _generate_cloudwatch_rule_name(job_id, os.getenv('STAGE', 'local'))
events.disable_rule(Name=rule_name)
logging.info(f"Schedule rule {rule_name} disabled")
def remove_job_schedule(job_id: int):
events = boto3.client('events', region_name=os.getenv('AWS_REGION_NAME'))
cloudwatch_rule_name = _generate_cloudwatch_rule_name(job_id, os.getenv('STAGE', 'local'))
try:
events.remove_targets(Rule=cloudwatch_rule_name,
Ids=[os.getenv('LAMBDA_PROXY_NAME')])
events.delete_rule(Name=cloudwatch_rule_name)
logging.info(f"Schedule of job {job_id} removed from CloudWatch rules")
except ClientError as e:
error_code = e.response.get("Error", {}).get("Code")
if error_code == "ResourceNotFoundException":
logging.info(f"Schedule of job {job_id} was not removed from CloudWatch rules because it's not there")
else:
raise e
|
py | 1a375d80432aa914e65a0bb2dc29384153b3b122 | """
ASGI config for buttonpython project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'buttonpython.settings')
application = get_asgi_application()
|
py | 1a37614f1e99b74f61623060949eeb0078230fb3 | import pandas as pd
from os import path
import vowpalwabbit
import unittest
import platform
import math
import re
def helper_get_test_dir():
curr_path = path.dirname(path.realpath(__file__))
return path.join(path.dirname(path.dirname(curr_path)), "test")
def helper_get_data():
train_data = [
{
"action": 1,
"cost": 2,
"probability": 0.4,
"feature1": "a",
"feature2": "c",
"feature3": "",
},
{
"action": 3,
"cost": 0,
"probability": 0.2,
"feature1": "b",
"feature2": "d",
"feature3": "",
},
{
"action": 4,
"cost": 1,
"probability": 0.5,
"feature1": "a",
"feature2": "b",
"feature3": "",
},
{
"action": 2,
"cost": 1,
"probability": 0.3,
"feature1": "a",
"feature2": "b",
"feature3": "c",
},
{
"action": 3,
"cost": 1,
"probability": 0.7,
"feature1": "a",
"feature2": "d",
"feature3": "",
},
]
train_df = pd.DataFrame(train_data)
train_df["index"] = range(1, len(train_df) + 1)
train_df = train_df.set_index("index")
test_data = [
{"feature1": "b", "feature2": "c", "feature3": ""},
{"feature1": "a", "feature2": "", "feature3": "b"},
{"feature1": "b", "feature2": "b", "feature3": ""},
{"feature1": "a", "feature2": "", "feature3": "b"},
]
test_df = pd.DataFrame(test_data)
# Add index to data frame
test_df["index"] = range(1, len(test_df) + 1)
test_df = test_df.set_index("index")
return train_df, test_df
def test_getting_started_example_cb():
return helper_getting_started_example("--cb")
def test_getting_started_example_legacy_cb():
return helper_getting_started_example("--cb_force_legacy --cb")
# Returns true if they are close enough to be considered equal.
def are_floats_equal(float_one_str: str, float_two_str: str, epsilon: float) -> bool:
float_one = float(float_one_str)
float_two = float(float_two_str)
# Special case handle these two as they will not be equal when checking absolute difference.
# But for the purposes of comparing the diff they are equal.
if math.isinf(float_one) and math.isinf(float_two):
return True
if math.isnan(float_one) and math.isnan(float_two):
return True
delta = abs(float_one - float_two)
if delta < epsilon:
return True
# Large number comparison code migrated from Perl RunTests
# We have a 'big enough' difference, but this difference
# may still not be meaningful in all contexts. Big numbers should be compared by ratio rather than
# by difference
# Must ensure we can divide (avoid div-by-0)
# If numbers are so small (close to zero),
# ($delta > $Epsilon) suffices for deciding that
# the numbers are meaningfully different
if abs(float_two) <= 1.0:
return False
# Now we can safely divide (since abs($word2) > 0) and determine the ratio difference from 1.0
ratio_delta = abs(float_one / float_two - 1.0)
return ratio_delta < epsilon
def is_float(value: str) -> bool:
try:
float(value)
return True
except ValueError:
return False
def is_line_different(output_line: str, ref_line: str, epsilon: float) -> bool:
output_tokens = re.split("[ \t:,@]+", output_line)
ref_tokens = re.split("[ \t:,@]+", ref_line)
if len(output_tokens) != len(ref_tokens):
return True
for output_token, ref_token in zip(output_tokens, ref_tokens):
output_is_float = is_float(output_token)
ref_is_float = is_float(ref_token)
if output_is_float and ref_is_float:
are_equal = are_floats_equal(output_token, ref_token, epsilon)
if not are_equal:
return True
else:
if output_token != ref_token:
return True
return False
@unittest.skipIf(
platform.machine() == "aarch64", "skipping due to floating-point error on aarch64"
)
def helper_getting_started_example(which_cb):
train_df, test_df = helper_get_data()
vw = vowpalwabbit.Workspace(which_cb + " 4 --log_level off", enable_logging=True)
for i in train_df.index:
action = train_df.loc[i, "action"]
cost = train_df.loc[i, "cost"]
probability = train_df.loc[i, "probability"]
feature1 = train_df.loc[i, "feature1"]
feature2 = train_df.loc[i, "feature2"]
feature3 = train_df.loc[i, "feature3"]
learn_example = (
str(action)
+ ":"
+ str(cost)
+ ":"
+ str(probability)
+ " | "
+ str(feature1)
+ " "
+ str(feature2)
+ " "
+ str(feature3)
)
vw.learn(learn_example)
assert (
vw.get_prediction_type() == vw.pMULTICLASS
), "prediction_type should be multiclass"
for j in test_df.index:
feature1 = test_df.loc[j, "feature1"]
feature2 = test_df.loc[j, "feature2"]
feature3 = test_df.loc[j, "feature3"]
choice = vw.predict(
"| " + str(feature1) + " " + str(feature2) + " " + str(feature3)
)
assert isinstance(choice, int), "choice should be int"
assert choice == 3, "predicted action should be 3 instead of " + str(choice)
# test that metrics is empty since "--extra_metrics filename" was not supplied
assert len(vw.get_learner_metrics()) == 0
vw.finish()
output = vw.get_log()
if which_cb.find("legacy") != -1:
test_file = "test-sets/ref/python_test_cb_legacy.stderr"
else:
test_file = "test-sets/ref/python_test_cb.stderr"
with open(path.join(helper_get_test_dir(), test_file), "r") as file:
expected = file.readlines()
for expected_line, output_line in zip(expected, output):
output_line = output_line.replace("...", "").strip()
expected_line = expected_line.replace("...", "").strip()
assert not is_line_different(output_line, expected_line, 0.001)
def test_getting_started_example_with():
train_df, test_df = helper_get_data()
# with syntax calls into vw.finish() automatically.
# you actually want to use 'with vowpalwabbit.Workspace("--cb 4") as vw:'
# but we need to assert on vw.finished for test purposes
vw = vowpalwabbit.Workspace("--cb 4")
with vw as vw:
for i in train_df.index:
action = train_df.loc[i, "action"]
cost = train_df.loc[i, "cost"]
probability = train_df.loc[i, "probability"]
feature1 = train_df.loc[i, "feature1"]
feature2 = train_df.loc[i, "feature2"]
feature3 = train_df.loc[i, "feature3"]
learn_example = (
str(action)
+ ":"
+ str(cost)
+ ":"
+ str(probability)
+ " | "
+ str(feature1)
+ " "
+ str(feature2)
+ " "
+ str(feature3)
)
vw.learn(learn_example)
assert (
vw.get_prediction_type() == vw.pMULTICLASS
), "prediction_type should be multiclass"
for j in test_df.index:
feature1 = test_df.loc[j, "feature1"]
feature2 = test_df.loc[j, "feature2"]
feature3 = test_df.loc[j, "feature3"]
choice = vw.predict(
"| " + str(feature1) + " " + str(feature2) + " " + str(feature3)
)
assert isinstance(choice, int), "choice should be int"
assert choice == 3, "predicted action should be 3"
assert vw.finished == True, "with syntax should finish() vw instance"
|
py | 1a3761536e914d61d0c7257781b9f6c7d3c1d8d8 | __author__ = 'shengjia'
import threading
import time
class Timer:
def __init__(self, max_time):
self.max_time = max_time
self.begin_time = time.time()
self.time_out_flag = False
self.time_out_flag_lock = threading.Lock()
self.timer_thread = threading.Thread(target=self.timer)
self.timer_thread.start()
def __del__(self):
try:
self.begin_time = 0.0
self.timer_thread.join()
except:
pass
# Timer thread that wakes up every second to check if the specified time has elapsed
def timer(self):
while True:
if time.time() - self.begin_time > self.max_time:
break
time.sleep(1.0)
self.time_out_flag_lock.acquire()
self.time_out_flag = True
self.time_out_flag_lock.release()
# Query of whether the timer has timed out
def timeout(self):
self.time_out_flag_lock.acquire()
if self.time_out_flag:
self.time_out_flag_lock.release()
return True
else:
self.time_out_flag_lock.release()
return False
# Remaining time before timeout
def time(self):
remained = self.max_time - (time.time() - self.begin_time)
if remained < 0:
return 0
else:
return remained
|
py | 1a3763b6c5b1f62134b25b6f7173844b3effc7b5 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for probabilistic programming features."""
from oryx.core.ppl.effect_handler import make_effect_handler
from oryx.core.ppl.transformations import block
from oryx.core.ppl.transformations import conditional
from oryx.core.ppl.transformations import graph_replace
from oryx.core.ppl.transformations import intervene
from oryx.core.ppl.transformations import joint_log_prob
from oryx.core.ppl.transformations import joint_sample
from oryx.core.ppl.transformations import log_prob
from oryx.core.ppl.transformations import LogProbFunction
from oryx.core.ppl.transformations import nest
from oryx.core.ppl.transformations import plate
from oryx.core.ppl.transformations import Program
from oryx.core.ppl.transformations import random_variable
from oryx.core.ppl.transformations import RANDOM_VARIABLE
from oryx.core.ppl.transformations import rv
from oryx.core.ppl.transformations import trace
from oryx.core.ppl.transformations import trace_log_prob
|
py | 1a37667183b98eb23825ffc3210aca6c354fcff7 | # -*- coding: utf-8 -*-
from datetime import datetime
import requests
from shipane_sdk.base_quant_client import BaseQuantClient
from shipane_sdk.joinquant.transaction import JoinQuantTransaction
class JoinQuantClient(BaseQuantClient):
BASE_URL = 'https://www.joinquant.com'
def __init__(self, **kwargs):
super(JoinQuantClient, self).__init__('JoinQuant')
self._session = requests.Session()
self._username = kwargs.get('username', None)
self._password = kwargs.get('password', None)
self._backtest_id = kwargs.get('backtest_id', None)
def login(self):
self._session.headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36',
'Referer': '{}/user/login/index'.format(self.BASE_URL),
'X-Requested-With': 'XMLHttpRequest',
'Origin': self.BASE_URL,
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}
self._session.get(self.BASE_URL)
response = self._session.post('{}/user/login/doLogin?ajax=1'.format(self.BASE_URL), data={
'CyLoginForm[username]': self._username,
'CyLoginForm[pwd]': self._password,
'ajax': 1
})
self._session.headers.update({
'cookie': response.headers['Set-Cookie']
})
super(JoinQuantClient, self).login()
def query(self):
today_str = datetime.today().strftime('%Y-%m-%d')
response = self._session.get('{}/algorithm/live/transactionDetail'.format(self.BASE_URL), params={
'backtestId': self._backtest_id,
'date': today_str,
'ajax': 1
})
transaction_detail = response.json()
raw_transactions = transaction_detail['data']['transaction']
transactions = []
for raw_transaction in raw_transactions:
transaction = JoinQuantTransaction(raw_transaction).normalize()
transactions.append(transaction)
return transactions
|
py | 1a3767153fcac2b7803d6fcf534eb07320ce925d | #!/usr/bin/env python
# Description: access topcons.net via WSDL service
# Copyright Nanjiang Shu ([email protected])
from __future__ import print_function
import os
import sys
import argparse
progname = os.path.basename(sys.argv[0])
wspace = ''.join([" "]*len(progname))
no_suds_message="""\
suds is not installed!
Please install suds by
$ pip install suds (for Python2)
$ pip install suds-jurko (for Python3)
"""
try:
from suds.client import Client
except ImportError:
print(no_suds_message, file=sys.stderr)
sys.exit(1)
import urllib
MAX_FILESIZE_IN_MB = 9
MAX_FILESIZE = MAX_FILESIZE_IN_MB*1024*1024
def ReadFile(infile, mode="r"):#{{{
try:
fpin = open(infile, mode)
content = fpin.read()
fpin.close()
return content
except IOError:
print("Failed to read file %s with mode '%s'"%(infile, mode), file=sys.stderr)
return ""
#}}}
def main(g_params):#{{{
wsdl_url = "https://topcons.net/pred/api_submitseq/?wsdl"
parser = argparse.ArgumentParser(
description='Access topcons2 web-server (https://topcons.net) through WSDL service ',
#formatter_class=argparse.RawDescriptionHelpFormatter,
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
Created 2015-02-04, updated 2018-01-12, Nanjiang Shu
Examples:
# submit test.fa with jobname 'test' to the server
%s -m submit -seq test.fa -jobname test
# try to retrieve the result for jobid 'rst_TTT' and save it to the current directory
%s -m get -jobid rst_TTT
'''%(progname, progname))
parser.add_argument('-m', action='store',
dest='mode', default='submit', choices=['submit','get'], required=True,
help='Set the mode of API\nsubmit - submit a job to WSDL\nget - retrieve the result from the server')
parser.add_argument('-seq', metavar='FILE', dest='seqfile',
help='Supply input sequence in FASTA format')
parser.add_argument('-jobname', metavar='STR', dest='jobname',
help='Give the job a name')
parser.add_argument('-jobid', metavar='STR', dest='jobid',
help='Retrieve the result by supplying a valid jobid')
parser.add_argument('-email', metavar='STR', dest='email',
help='Send a notification to the email when the result is ready')
parser.add_argument('-outpath', metavar='DIR', dest='outpath',
help='Save the retrieved data to outpath, (default: ./)')
args = parser.parse_args()
mode = args.mode
jobid = ""
email = ""
jobname = ""
fixtopfile = ""
seqfile = ""
outpath = "."
if args.jobid != None:
jobid = args.jobid
if args.email != None:
email = args.email
if args.jobname != None:
jobname = args.jobname
if args.seqfile != None:
seqfile = args.seqfile
if args.outpath != None:
outpath = args.outpath
if mode == "submit":
if seqfile == "":
print("You want to submit a job but seqfile is not set. Exit!", file=sys.stderr)
return 1
elif not os.path.exists(seqfile):
print("seqfile %s does not exist. Exit!"%(seqfile),file=sys.stderr)
return 1
try:
filesize = os.path.getsize(seqfile)
except OSError:
print("failed to get the size of seqfile %s. Exit"%(seqfile), file=sys.stderr)
return 1
if filesize >= MAX_FILESIZE:
print("You input seqfile %s exceeds the upper limit %d Mb."%(
seqfile, MAX_FILESIZE_IN_MB), file=sys.stderr)
print("Please split your seqfile and submit again.", file=sys.stderr)
return 1
seq = ReadFile(seqfile)
fixtop = ""
if fixtopfile != "":
fixtop = ReadFile(fixtopfile)
myclient = Client(wsdl_url, cache=None)
retValue = myclient.service.submitjob(seq, fixtop, jobname, email)
if len(retValue) >= 1:
strs = retValue[0]
jobid = strs[0]
result_url = strs[1]
numseq_str = strs[2]
errinfo = strs[3]
warninfo = strs[4]
if jobid != "None" and jobid != "":
print("You have successfully submitted your job "\
"with %s sequences. jobid = %s\n"%(numseq_str, jobid))
if warninfo != "" and warninfo != "None":
print("Warning message: %s\n"%str(warninfo))
else:
print("Failed to submit job!\n")
if errinfo != "" and errinfo != "None":
print("Error message:%s\n"% str(errinfo))
if warninfo != "" and warninfo != "None":
print("Warning message:%s\n"% str(warninfo))
else:
print("Failed to submit job!")
return 1
else:
if jobid == "":
print("You want to get the result of a job but jobid is not set. Exit!", file=sys.stderr )
return 1
myclient = Client(wsdl_url, cache=None)
retValue = myclient.service.checkjob(jobid)
if len(retValue) >= 1:
strs = retValue[0]
status = strs[0]
result_url = strs[1]
errinfo = strs[2]
if status == "Failed":
print("Your job with jobid %s is failed!"%(jobid))
if errinfo != "" and errinfo != "None":
print("Error message:\n"%str(errinfo))
elif status == "Finished":
print("Your job with jobid %s is finished!"%(jobid))
if not os.path.exists(outpath):
try:
os.makedirs(outpath)
except OSError:
print("Failed to create the outpath %s"%(outpath))
return 1
outfile = "%s/%s.zip"%(outpath, jobid)
urllib.urlretrieve (result_url, outfile)
if os.path.exists(outfile):
print("The result file %s has been retrieved for jobid %s"%(outfile, jobid))
else:
print("Failed to retrieve result for jobid %s"%(jobid))
elif status == "None":
print("Your job with jobid %s does not exist! Please check you typing!"%(jobid))
else:
print("Your job with jobid %s is not ready, status = %s"%(jobid, status))
else:
print("Failed to get job!")
return 1
return 0
#}}}
def InitGlobalParameter():#{{{
g_params = {}
g_params['isQuiet'] = True
return g_params
#}}}
if __name__ == '__main__' :
g_params = InitGlobalParameter()
sys.exit(main(g_params))
|
py | 1a3767f64757d1051e1924837fcda7bc39fe143f | import random
import math
import torch
from torch import nn, Tensor
import torchvision
from torch.jit.annotations import List, Tuple, Dict, Optional
from torchvision.ops import misc as misc_nn_ops
from .image_list import ImageList
from .roi_heads import paste_masks_in_image
@torch.jit.unused
def _resize_image_and_masks_onnx(image, self_min_size, self_max_size, target):
# type: (Tensor, float, float, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
from torch.onnx import operators
im_shape = operators.shape_as_tensor(image)[-2:]
min_size = torch.min(im_shape).to(dtype=torch.float32)
max_size = torch.max(im_shape).to(dtype=torch.float32)
scale_factor = torch.min(self_min_size / min_size, self_max_size / max_size)
image = torch.nn.functional.interpolate(
image[None], scale_factor=scale_factor, mode='bilinear',
align_corners=False)[0]
if target is None:
return image, target
if "masks" in target:
mask = target["masks"]
mask = misc_nn_ops.interpolate(mask[None].float(), scale_factor=scale_factor)[0].byte()
target["masks"] = mask
return image, target
def _resize_image_and_masks(image, self_min_size, self_max_size, target):
# type: (Tensor, float, float, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
im_shape = torch.tensor(image.shape[-2:])
min_size = float(torch.min(im_shape))
max_size = float(torch.max(im_shape))
scale_factor = self_min_size / min_size
if max_size * scale_factor > self_max_size:
scale_factor = self_max_size / max_size
image = torch.nn.functional.interpolate(
image[None], scale_factor=scale_factor, mode='bilinear',
align_corners=False)[0]
if target is None:
return image, target
if "masks" in target:
mask = target["masks"]
mask = misc_nn_ops.interpolate(mask[None].float(), scale_factor=scale_factor)[0].byte()
target["masks"] = mask
return image, target
class GeneralizedRCNNTransform(nn.Module):
"""
Performs input / target transformation before feeding the data to a GeneralizedRCNN
model.
The transformations it perform are:
- input normalization (mean subtraction and std division)
- input / target resizing to match min_size / max_size
It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets
"""
def __init__(self, min_size, max_size, image_mean, image_std):
super(GeneralizedRCNNTransform, self).__init__()
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
self.image_mean = image_mean
self.image_std = image_std
def forward(self,
images, # type: List[Tensor]
targets=None # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[ImageList, Optional[List[Dict[str, Tensor]]]]
images = [img for img in images]
for i in range(len(images)):
image = images[i]
target_index = targets[i] if targets is not None else None
if image.dim() != 3:
raise ValueError("images is expected to be a list of 3d tensors "
"of shape [C, H, W], got {}".format(image.shape))
image = self.normalize(image)
image, target_index = self.resize(image, target_index)
images[i] = image
if targets is not None and target_index is not None:
targets[i] = target_index
image_sizes = [img.shape[-2:] for img in images]
images = self.batch_images(images)
image_sizes_list = torch.jit.annotate(List[Tuple[int, int]], [])
for image_size in image_sizes:
assert len(image_size) == 2
image_sizes_list.append((image_size[0], image_size[1]))
image_list = ImageList(images, image_sizes_list)
return image_list, targets
def normalize(self, image):
dtype, device = image.dtype, image.device
mean = torch.as_tensor(self.image_mean, dtype=dtype, device=device)
std = torch.as_tensor(self.image_std, dtype=dtype, device=device)
return (image - mean[:, None, None]) / std[:, None, None]
def torch_choice(self, k):
# type: (List[int]) -> int
"""
Implements `random.choice` via torch ops so it can be compiled with
TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803
is fixed.
"""
index = int(torch.empty(1).uniform_(0., float(len(k))).item())
return k[index]
def resize(self, image, target):
# type: (Tensor, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
h, w = image.shape[-2:]
if self.training:
size = float(self.torch_choice(self.min_size))
else:
# FIXME assume for now that testing uses the largest scale
size = float(self.min_size[-1])
if torchvision._is_tracing():
image, target = _resize_image_and_masks_onnx(image, size, float(self.max_size), target)
else:
image, target = _resize_image_and_masks(image, size, float(self.max_size), target)
if target is None:
return image, target
bbox = target["boxes"]
bbox = resize_boxes(bbox, (h, w), image.shape[-2:])
target["boxes"] = bbox
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = resize_keypoints(keypoints, (h, w), image.shape[-2:])
target["keypoints"] = keypoints
return image, target
# _onnx_batch_images() is an implementation of
# batch_images() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_batch_images(self, images, size_divisible=32):
# type: (List[Tensor], int) -> Tensor
max_size = []
for i in range(images[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in images]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
stride = size_divisible
max_size[1] = (torch.ceil((max_size[1].to(torch.float32)) / stride) * stride).to(torch.int64)
max_size[2] = (torch.ceil((max_size[2].to(torch.float32)) / stride) * stride).to(torch.int64)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# which is not yet supported in onnx
padded_imgs = []
for img in images:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
return torch.stack(padded_imgs)
def max_by_axis(self, the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def batch_images(self, images, size_divisible=32):
# type: (List[Tensor], int) -> Tensor
if torchvision._is_tracing():
# batch_images() does not export well to ONNX
# call _onnx_batch_images() instead
return self._onnx_batch_images(images, size_divisible)
max_size = self.max_by_axis([list(img.shape) for img in images])
stride = float(size_divisible)
max_size = list(max_size)
max_size[1] = int(math.ceil(float(max_size[1]) / stride) * stride)
max_size[2] = int(math.ceil(float(max_size[2]) / stride) * stride)
batch_shape = [len(images)] + max_size
batched_imgs = images[0].new_full(batch_shape, 0)
for img, pad_img in zip(images, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
return batched_imgs
def postprocess(self,
result, # type: List[Dict[str, Tensor]]
image_shapes, # type: List[Tuple[int, int]]
original_image_sizes # type: List[Tuple[int, int]]
):
# type: (...) -> List[Dict[str, Tensor]]
if self.training:
return result
for i, (pred, im_s, o_im_s) in enumerate(zip(result, image_shapes, original_image_sizes)):
boxes = pred["boxes"]
boxes = resize_boxes(boxes, im_s, o_im_s)
result[i]["boxes"] = boxes
if "masks" in pred:
masks = pred["masks"]
masks = paste_masks_in_image(masks, boxes, o_im_s)
result[i]["masks"] = masks
if "keypoints" in pred:
keypoints = pred["keypoints"]
keypoints = resize_keypoints(keypoints, im_s, o_im_s)
result[i]["keypoints"] = keypoints
return result
def __repr__(self):
format_string = self.__class__.__name__ + '('
_indent = '\n '
format_string += "{0}Normalize(mean={1}, std={2})".format(_indent, self.image_mean, self.image_std)
format_string += "{0}Resize(min_size={1}, max_size={2}, mode='bilinear')".format(_indent, self.min_size,
self.max_size)
format_string += '\n)'
return format_string
def resize_keypoints(keypoints, original_size, new_size):
# type: (Tensor, List[int], List[int]) -> Tensor
ratios = [
torch.tensor(s, dtype=torch.float32, device=keypoints.device) /
torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_h, ratio_w = ratios
resized_data = keypoints.clone()
if torch._C._get_tracing_state():
resized_data_0 = resized_data[:, :, 0] * ratio_w
resized_data_1 = resized_data[:, :, 1] * ratio_h
resized_data = torch.stack((resized_data_0, resized_data_1, resized_data[:, :, 2]), dim=2)
else:
resized_data[..., 0] *= ratio_w
resized_data[..., 1] *= ratio_h
return resized_data
def resize_boxes(boxes, original_size, new_size):
# type: (Tensor, List[int], List[int]) -> Tensor
ratios = [
torch.tensor(s, dtype=torch.float32, device=boxes.device) /
torch.tensor(s_orig, dtype=torch.float32, device=boxes.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_height, ratio_width = ratios
xmin, ymin, xmax, ymax = boxes.unbind(1)
xmin = xmin * ratio_width
xmax = xmax * ratio_width
ymin = ymin * ratio_height
ymax = ymax * ratio_height
return torch.stack((xmin, ymin, xmax, ymax), dim=1)
|
py | 1a3768d4a36f5fad033195e21c0f38265e990d5a | import time
import os
import argparse
import sys
import datetime
sys.path.append('../../python/src')
from libnyumaya import AudioRecognition, FeatureExtractor
from auto_platform import AudiostreamSource, play_command,default_libpath
def detectKeywords(libpath):
audio_stream = AudiostreamSource()
extractor = FeatureExtractor(libpath)
detector = AudioRecognition(libpath)
extactor_gain = 1.0
#Add one or more keyword models
keywordIdFirefox = detector.addModel('../../models/Hotword/firefox_v1.4.5.premium',0.6)
keywordIdSheila = detector.addModel('../../models/Hotword/sheila_v1.4.5.premium',0.6)
keywordIdMarvin = detector.addModel('../../models/Hotword/marvin_v1.4.5.premium',0.6)
keywordIdAlexa = detector.addModel('../../models/Hotword/alexa_v1.4.5.premium',0.6)
bufsize = detector.getInputDataSize()
print("Audio Recognition Version: " + detector.getVersionString())
audio_stream.start()
try:
while(True):
frame = audio_stream.read(bufsize*2,bufsize*2)
if(not frame):
time.sleep(0.01)
continue
features = extractor.signalToMel(frame,extactor_gain)
prediction = detector.runDetection(features)
if(prediction != 0):
now = datetime.datetime.now().strftime("%d.%b %Y %H:%M:%S")
if(prediction == keywordIdFirefox):
print("Firefox detected:" + now)
elif(prediction == keywordIdSheila):
print("Sheila detected:" + now)
elif(prediction == keywordIdMarvin):
print("Marvin detected:" + now)
elif(prediction == keywordIdAlexa):
print("Alexa detected:" + now)
os.system(play_command + " ../resources/ding.wav")
except KeyboardInterrupt:
print("Terminating")
audio_stream.stop()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--libpath', type=str,
default=default_libpath,
help='Path to Platform specific nyumaya_lib.')
FLAGS, unparsed = parser.parse_known_args()
detectKeywords(FLAGS.libpath)
|
py | 1a3768e9190421d9fa74783f3df302bab1cbda3b | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define activation functions of neural network
from ...framework import ParamAttr
from ..initializer import Constant
from paddle.framework import get_default_dtype
from .. import functional as F
from paddle.nn import Layer
__all__ = []
class CELU(Layer):
r"""
CELU Activation.
.. math::
CELU(x) = max(0, x) + min(0, \alpha * (e^{x/\alpha}-1))
Parameters:
alpha (float, optional): The 'alpha' value of the CELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[-1. ,6.], [1., 15.6]])
m = paddle.nn.CELU(0.2)
out = m(x)
# [[-0.19865242, 6. ],
# [ 1. , 15.60000038]]
"""
def __init__(self, alpha=1.0, name=None):
super(CELU, self).__init__()
self._alpha = alpha
self._name = name
def forward(self, x):
return F.celu(x, self._alpha, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'alpha={}{}'.format(self._alpha, name_str)
class ELU(Layer):
r"""
ELU Activation.
.. math::
ELU(x)=
\left\{
\begin{array}{lcl}
x,& &\text{if } \ x > 0 \\
alpha * (e^{x} - 1),& &\text{if } \ x <= 0
\end{array}
\right.
Parameters:
alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[-1. ,6.], [1., 15.6]])
m = paddle.nn.ELU(0.2)
out = m(x)
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
def __init__(self, alpha=1.0, name=None):
super(ELU, self).__init__()
self._alpha = alpha
self._name = name
def forward(self, x):
return F.elu(x, self._alpha, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'alpha={}{}'.format(self._alpha, name_str)
class GELU(Layer):
r"""
GELU Activation.
If approximate is True
.. math::
GELU(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
else
.. math::
GELU(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}}))
Parameters:
approximate (bool, optional): Wether to enable approximation. Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]]))
m = paddle.nn.GELU()
out = m(x) # [-0.158655 0.345731 0.841345 1.39979]
m = paddle.nn.GELU(True)
out = m(x) # [-0.158808 0.345714 0.841192 1.39957]
"""
def __init__(self, approximate=False, name=None):
super(GELU, self).__init__()
self._approximate = approximate
self._name = name
def forward(self, x):
return F.gelu(x, self._approximate, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'approximate={}{}'.format(self._approximate, name_str)
class Hardshrink(Layer):
r"""
Hardshrink Activation
.. math::
hardshrink(x)=
\left\{
\begin{array}{rcl}
x, & & if \ x > threshold \\
x, & & if \ x < -threshold \\
0, & & if \ others
\end{array}
\right.
Parameters:
threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-1, 0.3, 2.5])
m = paddle.nn.Hardshrink()
out = m(x) # [-1., 0., 2.5]
"""
def __init__(self, threshold=0.5, name=None):
super(Hardshrink, self).__init__()
self._threshold = threshold
self._name = name
def forward(self, x):
return F.hardshrink(x, self._threshold, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'threshold={}{}'.format(self._threshold, name_str)
class Hardswish(Layer):
r"""
Hardswish activation
Hardswish is proposed in MobileNetV3, and performs better in computational stability
and efficiency compared to swish function. For more details please refer
to: https://arxiv.org/pdf/1905.02244.pdf
.. math::
Hardswish(x)=
\left\{
\begin{array}{cll}
0 &, & \text{if } x \leq -3 \\
x &, & \text{if } x \geq 3 \\
\frac{x(x+3)}{6} &, & \text{otherwise}
\end{array}
\right.
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-4., 5., 1.])
m = paddle.nn.Hardswish()
out = m(x) # [0., 5., 0.666667]
"""
def __init__(self, name=None):
super(Hardswish, self).__init__()
self._name = name
def forward(self, x):
return F.hardswish(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class Tanh(Layer):
r"""
Tanh Activation.
.. math::
Tanh(x) = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Tanh()
out = m(x)
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
def __init__(self, name=None):
super(Tanh, self).__init__()
self._name = name
def forward(self, x):
return F.tanh(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class Hardtanh(Layer):
r"""
Hardtanh Activation
.. math::
Hardtanh(x)=
\left\{
\begin{array}{cll}
max,& & \text{if } x > max \\
min,& & \text{if } x < min \\
x,& & \text{otherwise}
\end{array}
\right.
Parameters:
min (float, optional): The value of min for Hardtanh. Default is -1.
max (float, optional): The value of max for Hardtanh. Default is 1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-1.5, 0.3, 2.5])
m = paddle.nn.Hardtanh()
out = m(x) # [-1., 0.3, 1.]
"""
def __init__(self, min=-1.0, max=1.0, name=None):
super(Hardtanh, self).__init__()
self._min = min
self._max = max
self._name = name
def forward(self, x):
return F.hardtanh(x, self._min, self._max, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'min={}, max={}{}'.format(self._min, self._max, name_str)
class PReLU(Layer):
"""
PReLU Activation.
.. math::
PReLU(x) = max(0, x) + weight * min(0, x)
Parameters:
num_parameters (int, optional): Number of `weight` to learn. The supported values are:
1 - a single parameter `alpha` is used for all input channels;
Number of channels - a separate `alpha` is used for each input channel.
Default is 1.
init (float, optional): Init value of learnable `weight`. Default is 0.25.
weight_attr(ParamAttr, optional): The parameter attribute for the learnable `weight`.
Default is None. For more information, please refer to :ref:`api_paddle_ParamAttr`.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
data_format(str, optional): Data format that specifies the layout of input.
It may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default: "NCHW".
Shape:
- input: Tensor with any shape. Default dtype is float32.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.set_default_dtype("float64")
data = np.array([[[[-2.0, 3.0, -4.0, 5.0],
[ 3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[ 1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[ 6.0, 7.0, 8.0, 9.0]]]], 'float64')
x = paddle.to_tensor(data)
m = paddle.nn.PReLU(1, 0.25)
out = m(x)
# [[[[-0.5 , 3. , -1. , 5. ],
# [ 3. , -1. , 5. , -1.5 ],
# [-1.75, -2. , 8. , 9. ]],
# [[ 1. , -0.5 , -0.75, 4. ],
# [-1.25, 6. , 7. , -2. ],
# [ 6. , 7. , 8. , 9. ]]]]
"""
def __init__(self,
num_parameters=1,
init=0.25,
weight_attr=None,
data_format="NCHW",
name=None):
super(PReLU, self).__init__()
self._num_parameters = num_parameters
self._init = init
self._weight_attr = weight_attr
self._name = name
self._data_format = data_format
self._weight = self.create_parameter(
attr=self._weight_attr,
shape=[self._num_parameters],
dtype=get_default_dtype(),
is_bias=False,
default_initializer=Constant(self._init))
def forward(self, x):
return F.prelu(x, self._weight, data_format=self._data_format)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'num_parameters={}, data_format={}, init={}, dtype={}{}'.format(
self._num_parameters, self._data_format, self._init, self._dtype,
name_str)
class RReLU(Layer):
r"""
RReLU activation layer.
Applies the randomized leaky rectified liner unit function to improve generalization performance,
as described in the paper:
`Empirical Evaluation of Rectified Activations in Convolutional Network <https://arxiv.org/abs/1505.00853>`_
During training, randomly samples the negative slope for activation values as described below:
.. math::
RReLU(x)=
\left\{
\begin{array}{rcl}
x, & & if \ x >= 0 \\
a * x, & & otherwise \\
\end{array}
\right.
where :math:`x` is the input tensor,
:math:`a` is randomly sampled from uniform distribution in range (:math:`lower`, :math:`upper`),
In the test phase, the negative slope will take the average value of :math:`lower` and :math:`upper`:
.. math::
RReLU(x)=
\left\{
\begin{array}{rcl}
x, & & if \ x >= 0 \\
(lower + upper) * 0.5 * x, & & otherwise \\
\end{array}
\right.
where :math:`x` is the input tensor,
:math:`lower` and :math:`upper` are the bounds of uniform distribution.
Parameters:
lower (float, optional): The lower bound of uniform distribution. Default: 0.125.
upper (float, optional): The upper bound of uniform distribution. Default: 0.333.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape. Default dtype is float32.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
:name: RReLU-example
import paddle
input_tensor = paddle.to_tensor([[[[-2.0, 3.0, -4.0, 5.0],
[ 3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[ 1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[ 6.0, 7.0, 8.0, 9.0]]]], dtype='float32')
rrelu_layer = paddle.nn.RReLU(0.1, 0.3)
output = rrelu_layer(input_tensor)
#[[[[-0.20000899 3. -0.88108218 5. ]
# [ 3. -0.55175185 5. -1.07761011]
# [-1.06806871 -1.98962009 8. 9. ]]
# [[ 1. -0.52382672 -0.65515128 4. ]
# [-1.37663394 6. 7. -2.34657836]
# [ 6. 7. 8. 9. ]]]]
"""
def __init__(self, lower=1. / 8., upper=1. / 3., name=None):
super(RReLU, self).__init__()
self._lower = lower
self._upper = upper
self._name = name
def forward(self, x):
return F.rrelu(
x, lower=self._lower, upper=self._upper, training=self.training)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'lower={}, upper={}, training={}, dtype={}{}'.format(
self._lower, self._upper, self.training, self._dtype, name_str)
class ReLU(Layer):
"""
ReLU Activation.
.. math::
ReLU(x) = max(x, 0)
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-2., 0., 1.])
m = paddle.nn.ReLU()
out = m(x) # [0., 0., 1.]
"""
def __init__(self, name=None):
super(ReLU, self).__init__()
self._name = name
def forward(self, x):
return F.relu(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class ReLU6(Layer):
"""
ReLU6 Activation
.. math::
ReLU6(x) = min(max(0,x), 6)
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
m = paddle.nn.ReLU6()
out = m(x) # [0, 0.3, 6]
"""
def __init__(self, name=None):
super(ReLU6, self).__init__()
self._name = name
def forward(self, x):
return F.relu6(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class SELU(Layer):
r"""
SELU Activation
.. math::
SELU(x)= scale *
\left\{
\begin{array}{lcl}
x,& &\text{if } \ x > 0 \\
alpha * e^{x} - alpha,& &\text{if } \ x <= 0
\end{array}
\right.
Parameters:
scale (float, optional): The value of scale(must be greater than 1.0) for SELU. Default is 1.0507009873554804934193349852946
alpha (float, optional): The value of alpha(must be no less than zero) for SELU. Default is 1.6732632423543772848170429916717
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]]))
m = paddle.nn.SELU()
out = m(x) # [[0, 1.050701],[2.101402, 3.152103]]
"""
def __init__(self,
scale=1.0507009873554804934193349852946,
alpha=1.6732632423543772848170429916717,
name=None):
super(SELU, self).__init__()
self._scale = scale
self._alpha = alpha
self._name = name
def forward(self, x):
return F.selu(x, self._scale, self._alpha, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'scale={:.16f}, alpha={:.16f}{}'.format(self._scale, self._alpha,
name_str)
class LeakyReLU(Layer):
r"""
Leaky ReLU Activation.
.. math::
LeakyReLU(x)=
\left\{
\begin{array}{rcl}
x, & & if \ x >= 0 \\
negative\_slope * x, & & otherwise \\
\end{array}
\right.
Parameters:
negative_slope (float, optional): Slope of the activation function at
:math:`x < 0` . Default is 0.01.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
m = paddle.nn.LeakyReLU()
x = paddle.to_tensor(np.array([-2, 0, 1], 'float32'))
out = m(x) # [-0.02, 0., 1.]
"""
def __init__(self, negative_slope=0.01, name=None):
super(LeakyReLU, self).__init__()
self._negative_slope = negative_slope
self._name = name
def forward(self, x):
return F.leaky_relu(x, self._negative_slope, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'negative_slope={}{}'.format(self._negative_slope, name_str)
class Sigmoid(Layer):
"""
this interface is used to construct a callable object of the ``Sigmoid`` class. This layer calcluate the `sigmoid` of input x.
.. math::
Sigmoid(x) = \\frac{1}{1 + e^{-x}}
Parameters:
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Shape:
x: N-D tensor, available dtype is float16, float32, float64.
Returns:
A callable object of Sigmoid.
Examples:
.. code-block:: python
import paddle
m = paddle.nn.Sigmoid()
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = m(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
"""
def __init__(self, name=None):
super(Sigmoid, self).__init__()
self.name = name
def forward(self, x):
return F.sigmoid(x, self.name)
def extra_repr(self):
name_str = 'name={}'.format(self.name) if self.name else ''
return name_str
class Hardsigmoid(Layer):
r"""
This interface is used to construct a callable object of the ``Hardsigmoid`` class.
This layer calcluate the `hardsigmoid` of input x.
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
which is much faster than sigmoid.
.. math::
Hardsigmoid(x)=
\left\{
\begin{array}{rcl}
0, & & \text{if } \ x \leq -3 \\
1, & & \text{if } \ x \geq 3 \\
x/6 + 1/2, & & \text{otherwise}
\end{array}
\right.
Parameters:
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Shape:
x: N-D tensor, available dtype is float32, float64.
Returns:
A callable object of Hardsigmoid.
Examples:
.. code-block:: python
import paddle
m = paddle.nn.Hardsigmoid()
x = paddle.to_tensor([-4., 5., 1.])
out = m(x) # [0., 1, 0.666667]
"""
def __init__(self, name=None):
super(Hardsigmoid, self).__init__()
self.name = name
def forward(self, x):
return F.hardsigmoid(x, name=self.name)
def extra_repr(self):
name_str = 'name={}'.format(self.name) if self.name else ''
return name_str
class Softplus(Layer):
r"""
Softplus Activation
.. math::
Softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\
\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
Parameters:
beta (float, optional): The value of beta for Softplus. Default is 1
threshold (float, optional): The value of threshold for Softplus. Default is 20
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Softplus()
out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
def __init__(self, beta=1, threshold=20, name=None):
super(Softplus, self).__init__()
self._beta = beta
self._threshold = threshold
self._name = name
def forward(self, x):
return F.softplus(x, self._beta, self._threshold, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'beta={}, threshold={}{}'.format(self._beta, self._threshold,
name_str)
class Softshrink(Layer):
r"""
Softshrink Activation
.. math::
Softshrink(x)=
\left\{
\begin{array}{rcl}
x - threshold,& & \text{if } x > threshold \\
x + threshold,& & \text{if } x < -threshold \\
0,& & \text{otherwise}
\end{array}
\right.
Parameters:
threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
m = paddle.nn.Softshrink()
out = m(x) # [-0.4, 0, 0, 0.3]
"""
def __init__(self, threshold=0.5, name=None):
super(Softshrink, self).__init__()
self._threshold = threshold
self._name = name
def forward(self, x):
return F.softshrink(x, self._threshold, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'threshold={}{}'.format(self._threshold, name_str)
class Softsign(Layer):
r"""
Softsign Activation
.. math::
Softsign(x) = \frac{x}{1 + |x|}
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Softsign()
out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
"""
def __init__(self, name=None):
super(Softsign, self).__init__()
self._name = name
def forward(self, x):
return F.softsign(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class Swish(Layer):
r"""
Swish Activation.
.. math::
Swish(x) = \frac{x}{1 + e^{-x}}
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.]))
m = paddle.nn.Swish()
out = m(x) # [-0.238406, 0., 0.731059]
"""
def __init__(self, name=None):
super(Swish, self).__init__()
self._name = name
def forward(self, x):
return F.swish(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class Mish(Layer):
r"""
Mish Activation.
.. math::
softplus(x) = \begin{cases}
x, \text{if } x > \text{threshold} \\
\ln(1 + e^{x}), \text{otherwise}
\end{cases}
Mish(x) = x * \tanh(softplus(x))
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-5., 0., 5.])
m = paddle.nn.Mish()
out = m(x) # [-0.03357624, 0., 4.99955208]
"""
def __init__(self, name=None):
super(Mish, self).__init__()
self._name = name
def forward(self, x):
return F.mish(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class Tanhshrink(Layer):
"""
Tanhshrink Activation
.. math::
Tanhshrink(x) = x - tanh(x)
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Tanhshrink()
out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
"""
def __init__(self, name=None):
super(Tanhshrink, self).__init__()
self._name = name
def forward(self, x):
return F.tanhshrink(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class ThresholdedReLU(Layer):
r"""
Thresholded ReLU Activation
.. math::
ThresholdedReLU(x) =
\left\{
\begin{array}{rl}
x,& \text{if } \ x > threshold \\
0,& \text{otherwise}
\end{array}
\right.
Parameters:
threshold (float, optional): The value of threshold for ThresholdedReLU. Default is 1.0
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.]))
m = paddle.nn.ThresholdedReLU()
out = m(x) # [2., 0., 0.]
"""
def __init__(self, threshold=1.0, name=None):
super(ThresholdedReLU, self).__init__()
self._threshold = threshold
self._name = name
def forward(self, x):
return F.thresholded_relu(x, self._threshold, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'threshold={}{}'.format(self._threshold, name_str)
class Silu(Layer):
"""
Silu Activation.
.. math::
Silu(x) = \frac{x}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, or float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
m = paddle.nn.Silu()
out = m(x) # [ 0.731059, 1.761594, 2.857722, 3.928055 ]
"""
def __init__(self, name=None):
super(Silu, self).__init__()
self._name = name
def forward(self, x):
return F.silu(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class LogSigmoid(Layer):
r"""
LogSigmoid Activation.
.. math::
LogSigmoid(x) = log \frac{1}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, or float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
m = paddle.nn.LogSigmoid()
out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
"""
def __init__(self, name=None):
super(LogSigmoid, self).__init__()
self._name = name
def forward(self, x):
return F.log_sigmoid(x, self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
class Softmax(Layer):
r"""
Softmax Activation.
This operator implements the softmax layer. The calculation process is as follows:
1. The dimension :attr:`axis` of ``x`` will be permuted to the last.
2. Then ``x`` will be logically flattened to a 2-D matrix. The matrix's second
dimension(row length) is the same as the dimension :attr:`axis` of ``x``,
and the first dimension(column length) is the product of all other dimensions
of ``x``. For each row of the matrix, the softmax operator squashes the
K-dimensional(K is the width of the matrix, which is also the size of ``x``'s
dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional
vector of real values in the range [0, 1] that add up to 1.
3. After the softmax operation is completed, the inverse operations of steps 1 and 2
are performed to restore the two-dimensional matrix to the same dimension as the ``x`` .
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
For each row :math:`i` and each column :math:`j` in the matrix, we have:
.. math::
Softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])}
Example:
.. code-block:: text
Case 1:
Input:
x.shape = [2, 3, 4]
x.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = -1
Output:
out.shape = [2, 3, 4]
out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.07232949, 0.19661193, 0.19661193, 0.53444665]],
[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
Case 2:
Input:
x.shape = [2, 3, 4]
x.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = 1
Output:
out.shape = [2, 3, 4]
out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
[0.01786798, 0.01786798, 0.04661262, 0.04661262],
[0.97555875, 0.97555875, 0.93623955, 0.93623955]],
[[0.00490169, 0.00490169, 0.00490169, 0.00490169],
[0.26762315, 0.26762315, 0.26762315, 0.26762315],
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Parameters:
axis (int, optional): The axis along which to perform log_softmax
calculations. It should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = np.array([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]], 'float32')
x = paddle.to_tensor(x)
m = paddle.nn.Softmax()
out = m(x)
# [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
# [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
"""
def __init__(self, axis=-1, name=None):
super(Softmax, self).__init__()
self._axis = axis
self._dtype = None
self._name = name
def forward(self, x):
return F.softmax(x, self._axis, self._dtype, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'axis={}{}'.format(self._axis, name_str)
class LogSoftmax(Layer):
r"""
This operator implements the log_softmax layer. The calculation process is as follows:
.. math::
\begin{array} {rcl}
Out[i, j] &= &log(softmax(x)) \\
&= &log(\frac{\exp(X[i, j])}{\sum_j(\exp(X[i, j])})
\end{array}
Parameters:
axis (int, optional): The axis along which to perform log_softmax
calculations. It should be in range [-D, D), where D is the
dimensions of the input Tensor . If ``axis`` < 0, it works the
same way as :math:`axis + D` . Default is -1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
x = [[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]]
m = paddle.nn.LogSoftmax()
x = paddle.to_tensor(x)
out = m(x)
# [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948]
# [ -2.1270514 -9.127051 -0.12705144 -11.127051 ]
# [-16.313261 -17.313261 -1.3132617 -0.31326184]]
# [[ -3.0518122 -6.051812 -7.051812 -0.051812 ]
# [-12.313267 -1.3132664 -0.3132665 -15.313267 ]
# [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]]
"""
def __init__(self, axis=-1, name=None):
super(LogSoftmax, self).__init__()
self._axis = axis
self._name = name
def forward(self, x):
return F.log_softmax(x, self._axis)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'axis={}{}'.format(self._axis, name_str)
class Maxout(Layer):
r"""
Maxout Activation.
Assumed the input shape is (N, Ci, H, W).
The output shape is (N, Co, H, W).
Then Co = Ci/groups and the operator formula is as follows:
.. math::
\begin{array}{l}
&out_{si+j} = \max_{k} x_{gsi + sk + j} \\
&g = groups \\
&s = \frac{input.size}{num\_channels} \\
&0 \le i < \frac{num\_channels}{groups} \\
&0 \le j < s \\
&0 \le k < groups
\end{array}
Parameters:
groups (int, optional): The groups number of maxout. `groups` specifies the
index of channel dimension where maxout will be performed. This must be
a factor of number of features. Default is 1.
axis (int, optional): The axis along which to perform maxout calculations.
It should be 1 when data format is NCHW, be -1 or 3 when data format
is NHWC. If ``axis`` < 0, it works the same way as :math:`axis + D` ,
where D is the dimensions of ``x`` . Default is 1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: :math:`(N, C_{in}, H_{in}, W_{in})`
- output: :math:`(N, C_{out}, H_{out}, W_{out})`
Examples:
.. code-block:: python
import paddle
x = paddle.rand([1, 2, 3, 4])
# [[[[0.5002636 0.22272532 0.17402348 0.2874594 ]
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.02879342 0.88725346 0.61093384 0.38833922]]
# [[0.5231306 0.03807496 0.91661984 0.15602879]
# [0.666127 0.616567 0.30741522 0.24044901]
# [0.7142536 0.7351477 0.31588817 0.23782359]]]]
m = paddle.nn.Maxout(groups=2)
out = m(x)
# [[[[0.5231306 0.22272532 0.91661984 0.2874594 ]
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.7142536 0.88725346 0.61093384 0.38833922]]]]
"""
def __init__(self, groups, axis=1, name=None):
super(Maxout, self).__init__()
self._groups = groups
self._axis = axis
self._name = name
def forward(self, x):
return F.maxout(x, self._groups, self._axis, self._name)
def extra_repr(self):
name_str = ', name={}'.format(self._name) if self._name else ''
return 'groups={}, axis={}{}'.format(self._groups, self._axis, name_str)
class Softmax2D(Layer):
r"""
Softmax2D Activation.
Given a Tensor with shape (B, C, H, W) or (C, H, W), it will apply Softmax to each location (C, h_i, w_j).
The sum of result in each location (C, H_i, W_j) will be one.
Shape:
- Input: :math:`(B, C, H, W)` or :math:`(C, H, W)`
- Output: :math:`(B, C, H, W)` or :math:`(C, H, W)`(same as input)
Return:
A Tensor of the same shape and dtype as input with value in range [0, 1].
Examples:
.. code-block:: python
import paddle
x = paddle.rand([1, 2, 3, 4])
# [[[[0.42496058 0.1172187 0.14664008 0.8151267 ]
# [0.24430142 0.42052492 0.60372984 0.79307914]
# [0.4539401 0.90458065 0.10235776 0.62009853]]
# [[0.11731581 0.16053623 0.05667042 0.91876775]
# [0.9413854 0.30770817 0.6788164 0.9543593 ]
# [0.4145064 0.75909156 0.11598814 0.73599935]]]]
m = paddle.nn.Softmax2D()
out = m(x)
# [[[[0.5763103 0.48917228 0.5224772 0.4741129 ]
# [0.3324591 0.5281743 0.48123717 0.45976716]
# [0.5098571 0.5363083 0.49659243 0.4710572 ]]
# [[0.42368975 0.51082766 0.47752273 0.5258871 ]
# [0.66754097 0.47182566 0.5187628 0.5402329 ]
# [0.49014282 0.46369177 0.50340754 0.5289428 ]]]]
"""
def __init__(self, name=None):
super(Softmax2D, self).__init__()
self._dtype = None
self._name = name
def forward(self, x):
assert x.ndim == 3 or x.ndim == 4, "Softmax2D requires a 3D or 4D tensor as input. Received: {}D.".format(
x.ndim)
return F.softmax(x, axis=-3, dtype=self._dtype, name=self._name)
def extra_repr(self):
name_str = 'name={}'.format(self._name) if self._name else ''
return name_str
|
py | 1a3769599945accb94adad8986c049d505789da0 | import os
import torch
import torch.nn as nn
from torch_geometric.nn.conv import RGCNConv, GCNConv, GINConv
import torch_geometric.nn as gnn
import numpy as np
from torch_geometric.data import Data
import torch.nn.functional as F
class TemporalExtGCN(nn.Module):
def __init__(self, lfd_params, is_training=False, filename=None,
node_size=500, num_relations=1, output_size=4):
super().__init__()
self.lfd_params = lfd_params
# model filenames
self.filename = os.path.join(filename, ".".join(["model", "temporal_gcn", "pt"]))
# constants params
self.num_relations = num_relations # should be 7?
self.node_size = node_size
self.hidden_size = 512
self.output_size = output_size
# define model vars
# CONSIDER STACKED (will need ReLU, check on actual ITR data)
#self.gcn = GCNConv(self.node_size, self.hidden_size)
#self.gcn1 = GCNConv(self.node_size, self.hidden_size)
#self.gcn2 = GCNConv(self.hidden_size, self.hidden_size)
#self.gcn3 = GCNConv(self.hidden_size, self.hidden_size)
#self.gcn4 = GCNConv(self.hidden_size, self.hidden_size)
#print("self.node_size, self.hidden_size, self.output_size:",
# self.node_size, self.hidden_size, self.output_size)
self.gcn1 = RGCNConv(self.node_size, self.hidden_size, num_relations=self.num_relations)
self.gcn2 = RGCNConv(self.hidden_size, self.hidden_size, num_relations=self.num_relations)
#self.gcn3 = RGCNConv(self.hidden_size, self.hidden_size, num_relations=self.num_relations)#
#self.gcn4 = RGCNConv(self.hidden_size, self.hidden_size, num_relations=self.num_relations)#
#self.densegcn = gnn.DenseGCNConv(self.hidden_size, self.output_size)
#nn1 = nn.Sequential(nn.Linear(self.node_size, self.hidden_size), nn.ReLU(), nn.Linear(self.hidden_size, self.hidden_size))
#self.gcn = GINConv(nn1)
#self.drop = torch.nn.Dropout(p=0.25)
# print("temp_ext_gcn.py:", self.node_size, int(self.node_size/2) * self.output_size)
self.fc = nn.Linear(self.hidden_size, self.output_size)
# load model parameters
if not is_training:
assert self.filename is not None, \
"ERROR: temporal_ext_linear.py: filename must be defined when is_training is False"
self.load_model(self.filename)
else:
print("TemporalExtLinear is training")
def forward(self, x):
x, edge_idx, edge_attr, batch = x.x, x.edge_index, x.edge_attr, x.batch
#print("x9:", x.shape)
#print("edge_idx:", edge_idx.shape)
#print("edge_attr:", edge_attr.shape)
#print("batch:", batch.shape)
x = x.float().cuda()
edge_idx = edge_idx.long().cuda()
edge_attr = edge_attr.cuda()
batch = batch.cuda()
x = F.relu(self.gcn1(x, edge_idx, edge_attr))
x = F.relu(self.gcn2(x, edge_idx, edge_attr))
#x = F.relu(self.gcn3(x, edge_idx, edge_attr))#
#x = F.relu(self.gcn4(x, edge_idx, edge_attr))#
x = gnn.global_add_pool(x, batch)
x = self.fc(x)
return x
def save_model(self):
torch.save(self.state_dict(), self.filename)
print("TemporalExtLinear Linear model saved to: ", self.filename)
def load_model(self, filename):
assert os.path.exists(filename), "ERROR: temporal_ext_linear.py: Cannot locate saved model - "+filename
print("Loading TemporalExtLinear from: " + filename)
checkpoint = torch.load(filename)
self.load_state_dict(checkpoint, strict=True)
for param in self.parameters():
param.requires_grad = False
|
py | 1a376a6e0d948b8641f5d09835f1db85135c9742 | # coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.activity.gae_models."""
from __future__ import annotations
from core.constants import constants
from core.platform import models
from core.tests import test_utils
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import activity_models
from mypy_imports import base_models
(base_models, activity_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.activity])
class ActivityListModelTest(test_utils.GenericTestBase):
"""Tests the ActivityListModel class."""
def test_get_deletion_policy(self) -> None:
self.assertEqual(
activity_models.ActivityReferencesModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_featured_activity_list_always_exists(self) -> None:
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertIsNotNone(featured_model_instance)
self.assertEqual(featured_model_instance.id, 'featured')
self.assertEqual(featured_model_instance.activity_references, [])
def test_retrieving_non_existent_list(self) -> None:
with self.assertRaisesRegexp(Exception, 'Invalid ActivityListModel'): # type: ignore[no-untyped-call]
activity_models.ActivityReferencesModel.get_or_create(
'nonexistent_key')
def test_updating_featured_activity_list(self) -> None:
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertEqual(featured_model_instance.activity_references, [])
featured_model_instance.activity_references = [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'id': '0',
}]
featured_model_instance.update_timestamps()
featured_model_instance.put()
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertEqual(featured_model_instance.id, 'featured')
self.assertEqual(
featured_model_instance.activity_references, [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'id': '0',
}])
|
py | 1a376a9851093a34c1830f7a9fe615f45141b616 | # this builtin is needed so we can overwrite in test
import asyncio
import json
import logging
import os
import aiohttp
import questionary
from aiohttp import ClientTimeout
from prompt_toolkit.styles import Style
from typing import Any
from typing import Text, Optional, Dict, List
from rasa.cli import utils as cli_utils
from rasa.core import utils
from rasa.core.channels.channel import RestInput
from rasa.core.constants import DEFAULT_SERVER_URL
from rasa.core.interpreter import INTENT_MESSAGE_PREFIX
from rasa.utils.io import DEFAULT_ENCODING
logger = logging.getLogger(__name__)
STREAM_READING_TIMEOUT_ENV = "RASA_SHELL_STREAM_READING_TIMEOUT_IN_SECONDS"
DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS = 10
def print_buttons(
message: Dict[Text, Any],
is_latest_message: bool = False,
color=cli_utils.bcolors.OKBLUE,
) -> Optional[questionary.Question]:
if is_latest_message:
choices = cli_utils.button_choices_from_message_data(
message, allow_free_text_input=True
)
question = questionary.select(
message.get("text"),
choices,
style=Style([("qmark", "#6d91d3"), ("", "#6d91d3"), ("answer", "#b373d6")]),
)
return question
else:
cli_utils.print_color("Buttons:", color=color)
for idx, button in enumerate(message.get("buttons")):
cli_utils.print_color(cli_utils.button_to_string(button, idx), color=color)
def print_bot_output(
message: Dict[Text, Any],
is_latest_message: bool = False,
color=cli_utils.bcolors.OKBLUE,
) -> Optional[questionary.Question]:
if "buttons" in message:
question = print_buttons(message, is_latest_message, color)
if question:
return question
if "text" in message:
cli_utils.print_color(message.get("text"), color=color)
if "image" in message:
cli_utils.print_color("Image: " + message.get("image"), color=color)
if "attachment" in message:
cli_utils.print_color("Attachment: " + message.get("attachment"), color=color)
if "elements" in message:
cli_utils.print_color("Elements:", color=color)
for idx, element in enumerate(message.get("elements")):
cli_utils.print_color(
cli_utils.element_to_string(element, idx), color=color
)
if "quick_replies" in message:
cli_utils.print_color("Quick Replies:", color=color)
for idx, element in enumerate(message.get("quick_replies")):
cli_utils.print_color(cli_utils.button_to_string(element, idx), color=color)
if "custom" in message:
cli_utils.print_color("Custom json:", color=color)
cli_utils.print_color(json.dumps(message.get("custom"), indent=2), color=color)
def get_user_input(previous_response: Optional[Dict[str, Any]]) -> Optional[Text]:
button_response = None
if previous_response is not None:
button_response = print_bot_output(previous_response, is_latest_message=True)
if button_response is not None:
response = cli_utils.payload_from_button_question(button_response)
if response == cli_utils.FREE_TEXT_INPUT_PROMPT:
# Re-prompt user with a free text input
response = get_user_input({})
else:
response = questionary.text(
"",
qmark="Your input ->",
style=Style([("qmark", "#b373d6"), ("", "#b373d6")]),
).ask()
return response.strip() if response is not None else None
async def send_message_receive_block(
server_url, auth_token, sender_id, message
) -> List[Dict[Text, Any]]:
payload = {"sender": sender_id, "message": message}
url = f"{server_url}/webhooks/rest/webhook?token={auth_token}"
async with aiohttp.ClientSession() as session:
async with session.post(url, json=payload, raise_for_status=True) as resp:
return await resp.json()
async def send_message_receive_stream(
server_url: Text, auth_token: Text, sender_id: Text, message: Text
):
payload = {"sender": sender_id, "message": message}
url = f"{server_url}/webhooks/rest/webhook?stream=true&token={auth_token}"
# Define timeout to not keep reading in case the server crashed in between
timeout = _get_stream_reading_timeout()
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(url, json=payload, raise_for_status=True) as resp:
async for line in resp.content:
if line:
yield json.loads(line.decode(DEFAULT_ENCODING))
def _get_stream_reading_timeout() -> ClientTimeout:
timeout_in_seconds = int(
os.environ.get(
STREAM_READING_TIMEOUT_ENV, DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS
)
)
return ClientTimeout(timeout_in_seconds)
async def record_messages(
sender_id,
server_url=DEFAULT_SERVER_URL,
auth_token="",
max_message_limit=None,
use_response_stream=True,
) -> int:
"""Read messages from the command line and print bot responses."""
exit_text = INTENT_MESSAGE_PREFIX + "stop"
cli_utils.print_success(
"Bot loaded. Type a message and press enter "
"(use '{}' to exit): ".format(exit_text)
)
num_messages = 0
previous_response = None
await asyncio.sleep(0.5) # Wait for server to start
while not utils.is_limit_reached(num_messages, max_message_limit):
text = get_user_input(previous_response)
if text == exit_text or text is None:
break
if use_response_stream:
bot_responses = send_message_receive_stream(
server_url, auth_token, sender_id, text
)
previous_response = None
async for response in bot_responses:
if previous_response is not None:
print_bot_output(previous_response)
previous_response = response
else:
bot_responses = await send_message_receive_block(
server_url, auth_token, sender_id, text
)
previous_response = None
for response in bot_responses:
if previous_response is not None:
print_bot_output(previous_response)
previous_response = response
num_messages += 1
await asyncio.sleep(0) # Yield event loop for others coroutines
return num_messages
class CmdlineInput(RestInput):
@classmethod
def name(cls) -> Text:
return "cmdline"
def url_prefix(self) -> Text:
return RestInput.name()
|
py | 1a376b076b5d10be726a2f22e2ffd6b8a656e163 | # https://rosalind.info/problems/sims/
def fmtfa(fasta: list):
prev = True
header = []
seq = []
for f in fasta:
if ">" in f:
header.append(f[1:])
prev = True
elif prev:
seq.append(f)
prev = False
else:
seq[-1] += f
return header, seq
# INPUT -------------------------------------------
file_in = "sample/dataset/sims.txt"
file_out = "sample/output/sims.txt"
# file_in = "case/dataset/sims.txt"
with open(file_in) as f:
data = f.read().splitlines()
with open(file_out) as f:
outcome = f.read().splitlines()
# MAIN -------------------------------------------
# OUTPUT -------------------------------------------
with open("case/output/sims.txt", "w") as f:
f.write()
# END
|
py | 1a376b66f5b005dac304a54fa48a98339549cad1 | import matplotlib.pyplot as plt
plt.figure(1, figsize=(3,3))
ax = plt.subplot(111)
ax.annotate("",
xy=(0.2, 0.2), xycoords='data',
xytext=(0.8, 0.8), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
plt.show()
|
py | 1a376b69113e5eb2dfb7478481502556697787b2 | # JupyterHub configuration
#
## If you update this file, do not forget to delete the `jupyterhub_data` volume before restarting the jupyterhub service:
##
## docker volume rm jupyterhub_jupyterhub_data
##
## or, if you changed the COMPOSE_PROJECT_NAME to <name>:
##
## docker volume rm <name>_jupyterhub_data
##
import os
import sys
## Generic
c.JupyterHub.admin_access = True
c.Spawner.default_url = '/lab'
## Authenticator
from oauthenticator.generic import GenericOAuthenticator
#c.Application.log_level = 'DEBUG'
c.JupyterHub.authenticator_class = GenericOAuthenticator
c.GenericOAuthenticator.client_id = os.environ['OAUTH2_CLIENT_ID']
c.GenericOAuthenticator.client_secret = os.environ['OAUTH2_CLIENT_SECRET']
c.GenericOAuthenticator.token_url = 'https://gymnasium-ditzingen.de/iserv/oauth/v2/token'
c.GenericOAuthenticator.userdata_url = os.environ['OAUTH2_USERDATA_URL']
c.GenericOAuthenticator.userdata_params = {'state': 'state'}
# the next can be a callable as well, e.g.: lambda t: t.get('complex').get('structure').get('username')
#c.GenericOAuthenticator.username_key = 'preferred_username'
c.GenericOAuthenticator.login_service = 'IServ'
c.GenericOAuthenticator.scope = ['openid', 'profile', 'email', 'groups']
c.GenericOAuthenticator.admin_groups = ['Admins', 'admins']
c.GenericOAuthenticator.oauth_callback_url = 'https://jupyter.gymnasium-ditzingen.de/hub/oauth_callback'
c.OAuthenticator.tls_verify = False
# from oauthenticator.oauth2 import OAuthLoginHandler
# from oauthenticator.generic import GenericOAuthenticator
# from tornado.auth import OAuth2Mixin
# # OAuth2 endpoints
# class MyOAuthMixin(OAuth2Mixin):
# _OAUTH_AUTHORIZE_URL = 'https://gymnasium-ditzingen.de/iserv/oauth/v2/auth' ## Better move this to .env!
# _OAUTH_ACCESS_TOKEN_URL = 'https://gymnasium-ditzingen.de/iserv/oauth/v2/token'
# class MyOAuthLoginHandler(OAuthLoginHandler, MyOAuthMixin):
# pass
# # Authenticator configuration
# class MyOAuthAuthenticator(GenericOAuthenticator):
# login_service = 'IServ'
# login_handler = MyOAuthLoginHandler
# userdata_url = 'https://gymnasium-ditzingen.de/iserv/public/oauth/userinfo'
# token_url = 'https://gymnasium-ditzingen.de/iserv/oauth/v2/token'
# oauth_callback_url = 'https://jupyter.gymnasium-ditzingen.de/hub/oauth_callback'
# client_id = os.environ['OAUTH2_CLIENT_ID'] # Your client ID and secret, as provided to you
# client_secret = os.environ['OAUTH2_CLIENT_SECRET'] # by the OAuth2 service.
# c.JupyterHub.authenticator_class = MyOAuthAuthenticator
## Docker spawner
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.DockerSpawner.image = os.environ['DOCKER_JUPYTER_CONTAINER']
c.DockerSpawner.network_name = os.environ['DOCKER_NETWORK_NAME']
# See https://github.com/jupyterhub/dockerspawner/blob/master/examples/oauth/jupyterhub_config.py
c.JupyterHub.hub_ip = os.environ['HUB_IP']
# user data persistence
# see https://github.com/jupyterhub/dockerspawner#data-persistence-and-dockerspawner
notebook_dir = os.environ.get('DOCKER_NOTEBOOK_DIR') or '/home/jovyan' # THIS NEEDS TO CHANGE?
c.DockerSpawner.notebook_dir = notebook_dir
c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir }
# Other stuff
c.Spawner.cpu_limit = 1
c.Spawner.mem_limit = '10G'
## Services
c.JupyterHub.load_roles = [
{
"name": "jupyterhub-idle-culler-role",
"scopes": [
"list:users",
"read:users:activity",
"delete:servers",
# "admin:users", # if using --cull-users
],
# assignment of role's permissions to:
"services": ["jupyterhub-idle-culler-service"],
}
]
c.JupyterHub.services = [
{
"name": "jupyterhub-idle-culler-service",
"command": [
sys.executable,
"-m", "jupyterhub_idle_culler",
"--timeout=3600",
],
"admin": True, # Has to be disabled version>2.0
}
]
|
py | 1a376b8012cd68fabd286dd70a427fe8968a8688 | #!/usr/bin/env python
# encoding: utf-8
r"""
Module containing all Pyclaw solution objects
"""
from __future__ import absolute_import
import six
class State(object):
r"""
A PyClaw State object contains the current state on a particular patch,
including the unkowns q, the time t, and the auxiliary coefficients aux.
The variables num_eqn and num_aux determine the length of the first
dimension of the q and aux arrays.
:State Data:
The arrays :attr:`q`, and :attr:`aux` have variable
extents based on the patch dimensions and the values of
:attr:`num_eqn` and :attr:`num_aux`.
A State object is automatically created upon instantiation of a Solution object
from a Domain object:
>>> from clawpack import pyclaw
>>> x = pyclaw.Dimension('x',0.0,1.0,100)
>>> domain = pyclaw.Domain(x)
>>> num_eqn = 1
>>> solution = pyclaw.Solution(num_eqn,domain)
>>> print solution.state
PyClaw State object
Patch dimensions: [100]
Time t=0.0
Number of conserved quantities: 1
<BLANKLINE>
A State lives on a Patch, and can be instantiated directly
by first creating a Patch:
>>> x = pyclaw.Dimension('x',0.,1.,100)
>>> patch = pyclaw.Patch((x))
The arguments to the constructor are the patch, the number of equations,
and the number of auxiliary fields:
>>> state = pyclaw.State(patch,3,2)
>>> state.q.shape
(3, 100)
>>> state.aux.shape
(2, 100)
>>> state.t
0.0
Note that state.q and state.aux are initialized as empty arrays (not zeroed).
Additional parameters, such as scalar values that are used in the Riemann solver,
can be set using the dictionary state.problem_data.
"""
def __getattr__(self, key):
if key in ('num_dim', 'p_centers', 'p_edges', 'c_centers', 'c_edges',
'num_cells', 'lower', 'upper', 'delta', 'centers', 'edges',
'gauges'):
return self._get_grid_attribute(key)
else:
raise AttributeError("'State' object has no attribute '"+key+"'")
def _get_grid_attribute(self, name):
r"""
Return grid attribute
:Output:
- (id) - Value of attribute from ``grid``
"""
return getattr(self.grid,name)
# ========== Property Definitions ========================================
@property
def num_eqn(self):
r"""(int) - Number of unknowns (components of q)"""
if self.q is None:
raise Exception('state.num_eqn has not been set.')
else: return self.q.shape[0]
@property
def num_aux(self):
r"""(int) - Number of auxiliary fields"""
if self.aux is not None: return self.aux.shape[0]
else: return 0
@property
def grid(self):
return self.patch.grid
@property
def mp(self):
r"""(int) - Number of derived quantities"""
if self.p is not None: return self.p.shape[0]
else: return 0
@mp.setter
def mp(self,mp):
if self.p is not None:
raise Exception('Cannot change state.mp after aux is initialized.')
else:
self.p = self.new_array(mp)
@property
def mF(self):
r"""(int) - Number of output functionals"""
if self.F is not None: return self.F.shape[0]
else: return 0
@mF.setter
def mF(self,mF):
if self.F is not None:
raise Exception('Cannot change state.mF after aux is initialized.')
else:
self.F = self.new_array(mF)
# ========== Class Methods ===============================================
def __init__(self,geom,num_eqn,num_aux=0):
from clawpack.pyclaw import geometry
if isinstance(geom,geometry.Patch):
self.patch = geom
elif isinstance(geom,geometry.Domain):
self.patch = geom.patches[0]
else:
raise Exception("""A PyClaw State object must be initialized with
a PyClaw Patch object.""")
# ========== Attribute Definitions ===================================
r"""pyclaw.Patch.patch - The patch this state lives on"""
self.p = None
r"""(ndarray(mp,...)) - Cell averages of derived quantities."""
self.F = None
r"""(ndarray(mF,...)) - Cell averages of output functional densities."""
self.problem_data = {}
r"""(dict) - Dictionary of global values for this patch,
``default = {}``"""
self.t=0.
r"""(float) - Current time represented on this patch,
``default = 0.0``"""
self.index_capa = -1
self.keep_gauges = False
r"""(bool) - Keep gauge values in memory for every time step,
``default = False``"""
self.gauge_data = []
r"""(list) - List of numpy.ndarray objects. Each element of the list
stores the values of the corresponding gauge if ``keep_gauges`` is set
to ``True``"""
self.q = self.new_array(num_eqn)
self.aux = self.new_array(num_aux)
def __str__(self):
output = "PyClaw State object\n"
output += "Patch dimensions: %s\n" % str(self.patch.num_cells_global)
output += "Time t=%s\n" % (self.t)
output += "Number of conserved quantities: %s\n" % str(self.q.shape[0])
if self.aux is not None:
output += "Number of auxiliary fields: %s\n" % str(self.aux.shape[0])
if self.problem_data != {}:
output += "problem_data: "+self.problem_data.__str__()
return output
def is_valid(self):
r"""
Checks to see if this state is valid
The state is declared valid based on the following criteria:
- :attr:`q` is Fortran contiguous
- :attr:`aux` is Fortran contiguous
A debug logger message will be sent documenting exactly what was not
valid.
:Output:
- (bool) - True if valid, false otherwise.
"""
import logging
valid = True
logger = logging.getLogger('pyclaw.solution')
if not self.q.flags['F_CONTIGUOUS']:
logger.debug('q array is not Fortran contiguous.')
valid = False
if self.aux is not None:
if not self.aux.flags['F_CONTIGUOUS']:
logger.debug('q array is not Fortran contiguous.')
valid = False
return valid
def set_cparam(self,fortran_module):
"""
Set the variables in fortran_module.cparam to the corresponding values in
patch.problem_data. This is the mechanism for passing scalar variables to the
Fortran Riemann solvers; cparam must be defined as a common block in the
Riemann solver.
This function should be called from solver.setup(). This seems like a fragile
interdependency between solver and state; perhaps problem_data should belong
to solver instead of state.
This function also checks that the set of variables defined in cparam
all appear in problem_data.
"""
if hasattr(fortran_module,'cparam'):
try:
paramlist = [parm for parm in fortran_module.cparam.__dir__()
if '__' not in parm]
except AttributeError: # Python 2
paramlist = dir(fortran_module.cparam)
if not set(paramlist) <= set(self.problem_data.keys()):
raise Exception("""Some required value(s) in the cparam common
block in the Riemann solver have not been
set in problem_data.""")
for global_var_name,global_var_value in six.iteritems(self.problem_data):
setattr(fortran_module.cparam,global_var_name,global_var_value)
def set_num_ghost(self,num_ghost):
"""
Virtual routine (does nothing). Overridden in the petclaw.state class.
"""
pass
def set_q_from_qbc(self,num_ghost,qbc):
"""
Set the value of q using the array qbc. Typically this is called
after qbc has been updated by the solver.
"""
num_dim = self.patch.num_dim
if num_dim == 1:
self.q = qbc[:,num_ghost:-num_ghost]
elif num_dim == 2:
self.q = qbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost]
elif num_dim == 3:
self.q = qbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost,num_ghost:-num_ghost]
else:
raise Exception("Assumption (1 <= num_dim <= 3) violated.")
def set_aux_from_auxbc(self,num_ghost,auxbc):
"""
Set the value of aux using the array auxbc.
"""
patch = self.patch
if patch.num_dim == 1:
self.aux = auxbc[:,num_ghost:-num_ghost]
elif patch.num_dim == 2:
self.aux = auxbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost]
elif patch.num_dim == 3:
self.aux = auxbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost,num_ghost:-num_ghost]
else:
raise Exception("Assumption (1 <= num_dim <= 3) violated.")
def get_qbc_from_q(self,num_ghost,qbc):
"""
Fills in the interior of qbc by copying q to it.
"""
num_dim = self.patch.num_dim
if num_dim == 1:
qbc[:,num_ghost:-num_ghost] = self.q
elif num_dim == 2:
qbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost] = self.q
elif num_dim == 3:
qbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost,num_ghost:-num_ghost] = self.q
else:
raise Exception("Assumption (1 <= num_dim <= 3) violated.")
return qbc
def get_auxbc_from_aux(self,num_ghost,auxbc):
"""
Fills in the interior of auxbc by copying aux to it.
"""
num_dim = self.patch.num_dim
if num_dim == 1:
auxbc[:,num_ghost:-num_ghost] = self.aux
elif num_dim == 2:
auxbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost] = self.aux
elif num_dim == 3:
auxbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost,num_ghost:-num_ghost] = self.aux
else:
raise Exception("Assumption (1 <= num_dim <= 3) violated.")
return auxbc
# ========== Copy functionality ==========================================
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self,memo={}):
import copy
result = self.__class__(copy.deepcopy(self.patch),self.num_eqn,self.num_aux)
result.__init__(copy.deepcopy(self.patch),self.num_eqn,self.num_aux)
for attr in ('t'):
setattr(result,attr,copy.deepcopy(getattr(self,attr)))
if self.q is not None:
result.q = copy.deepcopy(self.q)
if self.aux is not None:
result.aux = copy.deepcopy(self.aux)
result.problem_data = copy.deepcopy(self.problem_data)
return result
def sum_F(self,i):
import numpy as np
return np.sum(np.abs(self.F[i,...]))
def new_array(self,dof):
import numpy as np
if dof==0: return None
shape = [dof]
shape.extend(self.grid.num_cells)
return np.empty(shape,order='F')
def get_q_global(self):
r"""
Returns a copy of state.q.
"""
return self.q.copy()
def get_aux_global(self):
r"""
Returns a copy of state.aux.
"""
return self.aux.copy()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
py | 1a376bc8e012dedb8a297abb896f2bc88013dd87 | # Generated by Django 3.2.4 on 2021-08-19 17:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('issue_tracker', '0027_alter_project_description'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.TextField(),
),
]
|
py | 1a376be6f16ef8c5bae7d64dab094af2d8bc3ccf | from hydroDL import kPath, utils
from hydroDL.app import waterQuality
from hydroDL.master import basins
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.master import slurm
from hydroDL.post import axplot, figplot
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy
codeLst = sorted(usgs.varC)
ep = 500
reTest = False
wqData = waterQuality.DataModelWQ('sbWTQ')
siteNoLst = wqData.info.siteNo.unique()
nSite = len(siteNoLst)
# single
labelLst = ['ntnonly', 'q', 'ntnq']
cLst = 'bgr'
labLst2 = ['input NTN', 'input Q', 'input NTN+Q']
corrMat = np.full([nSite, len(codeLst), len(labelLst)], np.nan)
rmseMat = np.full([nSite, len(codeLst), len(labelLst)], np.nan)
for iLab, label in enumerate(labelLst):
for iCode, code in enumerate(codeLst):
trainSet = '{}-Y1'.format(code)
testSet = '{}-Y2'.format(code)
if label == 'qpred':
outName = '{}-{}-{}-{}'.format('sbWTQ', code, label, trainSet)
else:
outName = '{}-{}-{}-{}'.format('sbWT', code, label, trainSet)
master = basins.loadMaster(outName)
ic = wqData.varC.index(code)
# for iT, subset in enumerate([trainSet, testSet]):
subset = testSet
yP, ycP = basins.testModel(
outName, subset, wqData=wqData, ep=ep, reTest=reTest)
ind = wqData.subset[subset]
info = wqData.info.iloc[ind].reset_index()
p = yP[-1, :, master['varY'].index(code)]
o = wqData.c[-1, ind, ic]
for iS, siteNo in enumerate(siteNoLst):
indS = info[info['siteNo'] == siteNo].index.values
rmse, corr = utils.stat.calErr(p[indS], o[indS])
corrMat[iS, iCode, iLab] = corr
rmseMat[iS, iCode, iLab] = rmse
# plot box
labLst1 = [usgs.codePdf.loc[code]['shortName'] +
'\n'+code for code in codeLst]
dataBox = list()
for k in range(len(codeLst)):
code = codeLst[k]
temp = list()
for i in range(len(labelLst)):
temp.append(corrMat[:, k, i])
dataBox.append(temp)
fig = figplot.boxPlot(dataBox, label1=labLst1, widths=0.5, cLst=cLst,
label2=labLst2, figsize=(12, 4), yRange=[0, 1])
# fig = figplot.boxPlot(dataBox, label1=labLst1, widths=0.5,
# label2=labLst2, figsize=(12, 4), sharey=False)
fig.show()
# significance test
testLst = ['add Q', 'add NTN']
indLst = [[0, 2], [1, 2]]
codeStrLst = ['{} {}'.format(
code, usgs.codePdf.loc[code]['shortName']) for code in codeLst]
dfS = pd.DataFrame(index=codeStrLst, columns=testLst)
for (test, ind) in zip(testLst, indLst):
for k, code in enumerate(codeLst):
data = [corrMat[:, k, x] for x in ind]
[a, b], _ = utils.rmNan(data)
s, p = scipy.stats.ttest_ind(a, b, equal_var=False)
# s, p = scipy.stats.ttest_rel(a, b)
dfS.loc[codeStrLst[k]][test] = p
dfS['aver R'] = np.nanmean(corrMat[:, :, 2], axis=0)
pd.options.display.float_format = '{:,.2f}'.format
print(dfS)
|
py | 1a376e8c6f4e7e9bab8b24fb59d56558c927d4e2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0003_subscription'),
]
operations = [
migrations.AlterModelOptions(
name='subscription',
options={'ordering': ('-created',)},
),
]
|
bzl | 1a376e95bc9714f06413fc518ef10812654734fa | # Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# War packaging.
jar_filetype = FileType([".jar"])
LIBS = [
"//java/com/google/gerrit/common:version",
"//java/com/google/gerrit/httpd/init",
"//lib:postgresql",
"//lib/bouncycastle:bcpkix",
"//lib/bouncycastle:bcprov",
"//lib/bouncycastle:bcpg",
"//lib/log:impl-log4j",
"//resources:log4j-config",
]
PGMLIBS = [
"//java/com/google/gerrit/pgm",
]
def _add_context(in_file, output):
input_path = in_file.path
return [
"unzip -qd %s %s" % (output, input_path),
]
def _add_file(in_file, output):
output_path = output
input_path = in_file.path
short_path = in_file.short_path
n = in_file.basename
if short_path.startswith("gerrit-"):
n = short_path.split("/")[0] + "-" + n
elif short_path.startswith("java/"):
n = short_path[5:].replace("/", "_")
output_path += n
return [
"test -L %s || ln -s $(pwd)/%s %s" % (output_path, input_path, output_path),
]
def _make_war(input_dir, output):
return "(%s)" % " && ".join([
"root=$(pwd)",
"TZ=UTC",
"export TZ",
"cd %s" % input_dir,
"find . -exec touch -t 198001010000 '{}' ';' 2> /dev/null",
"zip -X -9qr ${root}/%s ." % (output.path),
])
def _war_impl(ctx):
war = ctx.outputs.war
build_output = war.path + ".build_output"
inputs = []
# Create war layout
cmd = [
"set -e;rm -rf " + build_output,
"mkdir -p " + build_output,
"mkdir -p %s/WEB-INF/lib" % build_output,
"mkdir -p %s/WEB-INF/pgm-lib" % build_output,
]
# Add lib
transitive_lib_deps = depset()
for l in ctx.attr.libs:
if hasattr(l, "java"):
transitive_lib_deps += l.java.transitive_runtime_deps
elif hasattr(l, "files"):
transitive_lib_deps += l.files
for dep in transitive_lib_deps:
cmd += _add_file(dep, build_output + "/WEB-INF/lib/")
inputs.append(dep)
# Add pgm lib
transitive_pgmlib_deps = depset()
for l in ctx.attr.pgmlibs:
transitive_pgmlib_deps += l.java.transitive_runtime_deps
for dep in transitive_pgmlib_deps:
if dep not in inputs:
cmd += _add_file(dep, build_output + "/WEB-INF/pgm-lib/")
inputs.append(dep)
# Add context
transitive_context_deps = depset()
if ctx.attr.context:
for jar in ctx.attr.context:
if hasattr(jar, "java"):
transitive_context_deps += jar.java.transitive_runtime_deps
elif hasattr(jar, "files"):
transitive_context_deps += jar.files
for dep in transitive_context_deps:
cmd += _add_context(dep, build_output)
inputs.append(dep)
# Add zip war
cmd.append(_make_war(build_output, war))
ctx.actions.run_shell(
inputs = inputs,
outputs = [war],
mnemonic = "WAR",
command = "\n".join(cmd),
use_default_shell_env = True,
)
# context: go to the root directory
# libs: go to the WEB-INF/lib directory
# pgmlibs: go to the WEB-INF/pgm-lib directory
_pkg_war = rule(
attrs = {
"context": attr.label_list(allow_files = True),
"libs": attr.label_list(allow_files = jar_filetype),
"pgmlibs": attr.label_list(allow_files = False),
},
outputs = {"war": "%{name}.war"},
implementation = _war_impl,
)
def pkg_war(name, ui = "ui_optdbg", context = [], doc = False, **kwargs):
doc_ctx = []
doc_lib = []
ui_deps = []
if ui == "polygerrit" or ui == "ui_optdbg" or ui == "ui_optdbg_r":
ui_deps.append("//polygerrit-ui/app:polygerrit_ui")
if ui and ui != "polygerrit":
ui_deps.append("//gerrit-gwtui:%s" % ui)
if doc:
doc_ctx.append("//Documentation:html")
doc_lib.append("//Documentation:index")
_pkg_war(
name = name,
libs = LIBS + doc_lib,
pgmlibs = PGMLIBS,
context = doc_ctx + context + ui_deps + [
"//java:gerrit-main-class_deploy.jar",
"//webapp:assets",
],
**kwargs
)
|
py | 1a376eb2b6855bc03f47f1aeef8ff583957cd686 | import os
import data.users as data
from datetime import timedelta
from fastapi import APIRouter, Depends, HTTPException, Path, status
from fastapi.security import OAuth2PasswordRequestForm
from fastapi.responses import RedirectResponse
from models import Token, User, UserInDB, UpdateUser, RegisterUser, Message
from dependencies import get_current_user, current_user_is_active, get_email
from utils.password import authenticate, create_access_token, get_hash
from mail.send import user as send_email
from constants import (
USERNAME_KEY,
EMAIL_KEY,
PASSWORD_KEY,
ACTIVE_KEY,
ACCESS_TOKEN_EXPIRE_MINUTES,
)
import config
router = APIRouter(tags=["users"], responses={404: {"description": "Not found"}})
@router.post("/register", response_model=User)
async def register_user(user: RegisterUser):
if data.get_user(user.username):
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="User with same username already exists",
)
if data.get_user_by_email(user.email):
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="This email already has an account",
)
data.add_user(user.username, user.email, get_hash(user.password))
return_user = data.get_user(user.username)
if not return_user:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Item not found, failed to register",
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
confirm_token = create_access_token(
data={"sub": user[EMAIL_KEY]}, expires_delta=access_token_expires
)
backend = os.getenv("BACKEND_URL") or config.CONFIG.backend
await send_email(
user[EMAIL_KEY],
{
"username": user[USERNAME_KEY],
"confirm_url": f"{backend}/confirm/{confirm_token}",
},
)
return return_user
@router.post("/auth", response_model=Token)
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):
user = authenticate(form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user[USERNAME_KEY]}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer", "expires_in": 3600}
@router.get("/confirm/{token}")
async def confirm_email_token(
token: str = Path(..., description="Token to confirm email")
):
frontend = os.getenv("FRONTEND_URL") or config.CONFIG.frontend
email = await get_email(token)
if email == "expired":
return RedirectResponse(url=f"{frontend}/?status=expired")
user = data.get_user_by_email(email)
if user:
data.activate_user(user["id"], {ACTIVE_KEY: True})
return RedirectResponse(url=f"{frontend}#status=confirmed")
else:
return RedirectResponse(url=f"{frontend}#status=unconfirmed")
@router.get("/resend", response_model=Message)
async def regenerate_confirm_email(user: User = Depends(get_current_user)):
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
confirm_token = create_access_token(
data={"sub": user[EMAIL_KEY]}, expires_delta=access_token_expires
)
backend = os.getenv("BACKEND_URL") or config.CONFIG.backend
await send_email(
user[EMAIL_KEY],
{
"username": user[USERNAME_KEY],
"confirm_url": f"{backend}/confirm/{confirm_token}",
},
)
return {"msg": "resent"}
@router.put("/users/me", response_model=User)
async def update_user_me(
user: UpdateUser, current_user: User = Depends(current_user_is_active)
):
if data.get_user(user.username):
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="User with same username already exists",
)
db_user = data.get_user(current_user[USERNAME_KEY])
data.update_user(db_user["id"], user)
return data.get_user(user.username or current_user[USERNAME_KEY])
@router.get("/users/me", response_model=User)
async def read_users_me(current_user: User = Depends(get_current_user)):
return current_user
@router.delete("/users/me", response_model=Message)
async def unregister_users_me(current_user: UserInDB = Depends(get_current_user)):
data.remove_user(
current_user[USERNAME_KEY], current_user[EMAIL_KEY], current_user[PASSWORD_KEY]
)
if data.get_user(current_user[USERNAME_KEY]):
raise HTTPException(
status_code=status.HTTP_417_EXPECTATION_FAILED, detail="Failed to delete"
)
return {"msg": "deleted"} |
py | 1a376f885b06a7d628e9d2293d4ba05403c21b1e | '''
QUESTÃO 5
Um palíndromo é uma seqüência de caracteres cuja leitura é idêntica se feita da direita
para esquerda ou vice−versa. Por exemplo: OSSO e OVO são palíndromos. Em textos
mais complexos os espaços e pontuação são ignorados. A frase SUBI NO ONIBUS é o
exemplo de uma frase palíndroma onde os espaços foram ignorados. Faça um
programa que leia uma seqüência de caracteres, mostre−a e diga se é um palíndromo
ou não.
'''
import pilha as pilha
from unidecode import unidecode
def palindromoPilha(array):
auxA = []
auxB = []
if pilha.isEmpty(array):
print("String vazia")
else:
print("\n*************** teste de palíndromo ***************")
print('Verificar: ' + array)
# Remover acentos
array = unidecode(array)
# Deixar letras minúsculas, pois evita erro de comparação
array = array.lower()
# Verificar se contém espaço e remover
if " " in array:
array = array.replace(" ", "")
loop = ((pilha.size(array)) - 1)
for i in range(loop, -1, -1):
auxA = pilha.push(array[i], auxA)
auxB = pilha.push(array[loop - i], auxB)
if auxA == auxB:
print("Teste Verdadeiro")
return True
else:
print("Teste Falso")
return False
# Função Palíndromo utilizando estrutura de pilha
print("\n********** Função de Palíndromo **********")
palin = ['ralo do dólar', 'até o poeta', 'tomarei café após a sopa']
for i in range(len(palin)):
palindromoPilha(palin[i]) |
py | 1a37707e1d9981048fd0c15037a8912725949c33 | from abc import ABC, abstractmethod
from typing import List, Tuple, Union
import torch
from torch.nn import functional as F
class Bandit(ABC):
"""Abstract Base class for bandits"""
@abstractmethod
def step(self, action: int) -> Tuple[torch.Tensor, int]:
"""Generate reward for given action and select next context.
Args:
action (int): Selected action.
Returns:
Tuple[torch.Tensor, int]: Tuple of the next context and the
reward generated for given action
"""
@abstractmethod
def reset(self) -> torch.Tensor:
"""Reset bandit.
Returns:
torch.Tensor: Current context selected by bandit.
"""
class BanditAgent(ABC):
"""Abstract Base class for bandit solving agents"""
@abstractmethod
def select_action(self, context: torch.Tensor) -> int:
"""Select an action based on given context
Args:
context (torch.Tensor): The context vector to select action for
Returns:
int: The action to take
"""
class MultiArmedBandit(Bandit):
"""
Base Class for a Contextual Multi-armed Bandit
:param bandits: Number of bandits
:param arms: Number of arms in each bandit
:param context_type: Give context as either tensor or int
:type bandits: int
:type arms: int
:type context_type: str
"""
def __init__(self, bandits: int = 1, arms: int = 1, context_type: str = "tensor"):
self._nbandits = bandits
self._narms = arms
self.n_actions = arms
self.context_dim = bandits
if not (context_type == "int" or context_type == "tensor"):
raise ValueError(
f"context_type should be either tensor or int, found {context_type}"
)
self.context_type = context_type
self._reset_metrics()
self._reset_bandit()
@property
def reward_hist(self) -> List[float]:
"""
Get the history of rewards received at each step
:returns: List of rewards
:rtype: list
"""
return self._reward_hist
@property
def regret_hist(self) -> List[float]:
"""
Get the history of regrets incurred at each step
:returns: List of regrest
:rtype: list
"""
return self._regret_hist
@property
def cum_regret_hist(self) -> Union[List[int], List[float]]:
return self._cum_regret_hist
@property
def cum_reward_hist(self) -> Union[List[int], List[float]]:
return self._cum_reward_hist
@property
def cum_regret(self) -> Union[int, float]:
return self._cum_regret
@property
def cum_reward(self) -> Union[int, float]:
return self._cum_reward
@property
def arms(self) -> int:
"""
Get the number of arms in each bandit
:returns: Number of arms in each bandit
:rtype: int
"""
return self._narms
@property
def bandits(self) -> int:
"""
Get the number of bandits
:returns: Number of bandits
:rtype: int
"""
return self._nbandits
def _reset_metrics(self) -> None:
"""
Resets the various metrics to empty
"""
self._regret_hist = []
self._reward_hist = []
self._cum_regret_hist = []
self._cum_reward_hist = []
self._cum_regret = 0
self._cum_reward = 0
def _reset_bandit(self) -> None:
"""
Resets the current bandit and context
"""
self.curr_bandit = torch.randint(self.bandits, (1,))
self.curr_context = F.one_hot(
self.curr_bandit, num_classes=self.context_dim
).to(torch.float)
def reset(self) -> torch.Tensor:
"""
Resets metrics to empty the current bandit randomly
:returns: The current bandit as observation
:rtype: int
"""
self._reset_metrics()
self._reset_bandit()
if self.context_type == "tensor":
return self.curr_context.view(-1)
elif self.context_type == "int":
return self.curr_bandit.item()
def step(self, action: int) -> Tuple[Union[int, torch.Tensor], Union[int, float]]:
"""
Takes an action in the bandit and returns the sampled reward
This method needs to be implemented in the specific bandit.
:param action: The action to take
:type action: int
:returns: Reward sampled for the action taken
:rtype: int, float ...
"""
reward, max_reward = self._compute_reward(action)
regret = max_reward - reward
self._cum_regret += regret
self.cum_regret_hist.append(self._cum_regret)
self.regret_hist.append(regret)
self._cum_reward += reward
self.cum_reward_hist.append(self._cum_reward)
self.reward_hist.append(reward)
self._reset_bandit()
if self.context_type == "tensor":
return self.curr_context.view(-1), reward
elif self.context_type == "int":
return self.curr_bandit, reward
|
py | 1a37713de12eb5c8df41af7f5aecf9edafeafae7 | # Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from tests.integrations.base import BaseTest
class TestReadIndexView(BaseTest):
def test_listdirs(self):
assert os.listdir(self.mount_path) == ['current', 'history']
|
py | 1a37715cdf3ecfc5ea260e6c180f00e0a5507cad | """Test suite for """
import logging
import os
import pathlib
import re
import subprocess
from contextlib import _GeneratorContextManager
from typing import Callable
from typing import Iterable
from typing import List
import pytest
from autoflake8.fix import break_up_import
from autoflake8.fix import check
from autoflake8.fix import detect_source_encoding
from autoflake8.fix import filter_code
from autoflake8.fix import filter_from_import
from autoflake8.fix import filter_star_import
from autoflake8.fix import filter_unused_variable
from autoflake8.fix import filter_useless_pass
from autoflake8.fix import find_files
from autoflake8.fix import fix_code
from autoflake8.fix import get_diff_text
from autoflake8.fix import get_indentation
from autoflake8.fix import is_exclude_file
from autoflake8.fix import is_literal_or_name
from autoflake8.fix import is_multiline_import
from autoflake8.fix import is_multiline_statement
from autoflake8.fix import is_python_file
from autoflake8.fix import match_file
from autoflake8.fix import unused_import_line_numbers
from autoflake8.fix import useless_pass_line_numbers
@pytest.mark.parametrize(
("source", "expected"),
[
pytest.param(
b"# coding: iso-8859-1",
"iso-8859-1",
id="single line",
),
pytest.param(
b"#!/usr/bin/env python\n# coding: iso-8859-1\n",
"iso-8859-1",
id="two lines",
),
pytest.param(
b"",
"utf-8",
id="empty file",
),
pytest.param(
b"#!/usr/bin/env python\n\n# coding: iso-8859-1\n",
"utf-8",
id="coding defined in the third line (invalid)",
),
pytest.param(
b"#!/usr/bin/env python\n# encoding: utf-16\n",
"utf-16",
id="encoding",
),
pytest.param(
b"# ~*~ coding: utf-16 ~*~\n\n\n\n\n",
"utf-16",
id="editor style",
),
pytest.param(
b"#!/usr/bin/python\n# vim: set fileencoding=utf-16 :\n",
"utf-16",
id="vim style",
),
pytest.param(
b"#!/usr/bin/python\n\n\n# vim: set fileencoding=utf-16 :\n",
"utf-8",
id="vim style, fourth line (invalid)",
),
],
)
def test_detect_source_encoding(source: bytes, expected: str) -> None:
assert detect_source_encoding(source) == expected
def test_unused_import_line_numbers() -> None:
assert (
list(
unused_import_line_numbers(check(b"import os\n")),
)
== [1]
)
def test_unused_import_line_numbers_with_from() -> None:
assert (
list(
unused_import_line_numbers(
check(b"from os import path\n"),
),
)
== [1]
)
def test_unused_import_line_numbers_with_dot() -> None:
assert (
list(
unused_import_line_numbers(
check(b"import os.path\n"),
),
)
== [1]
)
@pytest.mark.parametrize(
("source", "expected"),
[
(b"", b""),
(b" abc", b" "),
(b" abc \n\t", b" "),
(b"\tabc \n\t", b"\t"),
(b" \t abc \n\t", b" \t "),
(b" ", b""),
],
)
def test_get_indentation(source: bytes, expected: bytes) -> None:
assert get_indentation(source) == expected
def test_filter_star_import() -> None:
assert (
filter_star_import(b"from math import *", [b"cos"]) == b"from math import cos"
)
assert (
filter_star_import(b"from math import *", [b"sin", b"cos"])
== b"from math import cos, sin"
)
def test_filter_unused_variable() -> None:
assert filter_unused_variable(b"x = foo()") == b"foo()"
assert filter_unused_variable(b" x = foo()") == b" foo()"
def test_filter_unused_variable_with_literal_or_name() -> None:
assert filter_unused_variable(b"x = 1") == b"pass"
assert filter_unused_variable(b"x = y") == b"pass"
assert filter_unused_variable(b"x = {}") == b"pass"
def test_filter_unused_variable_with_basic_data_structures() -> None:
assert filter_unused_variable(b"x = dict()") == b"pass"
assert filter_unused_variable(b"x = list()") == b"pass"
assert filter_unused_variable(b"x = set()") == b"pass"
def test_filter_unused_variable_should_ignore_multiline() -> None:
assert filter_unused_variable(b"x = foo()\\") == b"x = foo()\\"
def test_filter_unused_variable_should_multiple_assignments() -> None:
assert filter_unused_variable(b"x = y = foo()") == b"x = y = foo()"
def test_filter_unused_variable_with_exception() -> None:
assert (
filter_unused_variable(b"except Exception as exception:")
== b"except Exception:"
)
assert (
filter_unused_variable(
b"except (ImportError, ValueError) as foo:",
)
== b"except (ImportError, ValueError):"
)
def test_filter_code() -> None:
result = b"".join(
filter_code(
b"""\
import os
import re
os.foo()
""",
),
)
expected = b"""\
import os
pass
os.foo()
"""
assert result == expected
def test_filter_code_with_indented_import() -> None:
result = b"".join(
filter_code(
b"""\
import os
if True:
import re
os.foo()
""",
),
)
expected = b"""\
import os
if True:
pass
os.foo()
"""
assert result == expected
def test_filter_code_with_from() -> None:
result = b"".join(
filter_code(
b"""\
from os import path
x = 1
""",
),
)
expected = b"""\
pass
x = 1
"""
assert result == expected
def test_filter_code_with_not_from() -> None:
result = b"".join(
filter_code(
b"""\
import frommer
x = 1
""",
),
)
expected = b"""\
pass
x = 1
"""
assert result == expected
def test_filter_code_with_used_from() -> None:
result = b"".join(
filter_code(
b"""\
import frommer
print(frommer)
""",
),
)
expected = b"""\
import frommer
print(frommer)
"""
assert result == expected
def test_filter_code_with_ambiguous_from() -> None:
result = b"".join(
filter_code(
b"""\
from frommer import abc, frommer, xyz
""",
),
)
expected = b"""\
pass
"""
assert result == expected
def test_filter_code_should_avoid_inline_except() -> None:
line = b"""\
try: from zap import foo
except: from zap import bar
"""
assert (
b"".join(
filter_code(line),
)
== line
)
def test_filter_code_should_avoid_escaped_newlines() -> None:
line = b"""\
try:\\
from zap import foo
except:\\
from zap import bar
"""
assert b"".join(filter_code(line)) == line
def test_filter_code_with_remove_all_unused_imports() -> None:
result = b"".join(
filter_code(
b"""\
import foo
import zap
x = 1
""",
),
)
expected = b"""\
pass
pass
x = 1
"""
assert result == expected
def test_filter_code_should_ignore_imports_with_inline_comment() -> None:
result = b"".join(
filter_code(
b"""\
from os import path # foo
from os import path
from fake_foo import z # foo, foo, zap
x = 1
""",
),
)
expected = b"""\
from os import path # foo
pass
from fake_foo import z # foo, foo, zap
x = 1
"""
assert result == expected
def test_filter_code_should_respect_noqa() -> None:
result = b"".join(
filter_code(
b"""\
from os import path
import re # noqa
from subprocess import Popen # NOQA
import sys # noqa: F401
x = 1
""",
),
)
expected = b"""\
pass
import re # noqa
from subprocess import Popen # NOQA
import sys # noqa: F401
x = 1
"""
assert result == expected
def test_filter_code_expand_star_imports__one_function() -> None:
result = b"".join(
filter_code(
b"""\
from math import *
sin(1)
""",
expand_star_imports=True,
),
)
expected = b"""\
from math import sin
sin(1)
"""
assert result == expected
def test_filter_code_expand_star_imports__two_functions() -> None:
result = b"".join(
filter_code(
b"""\
from math import *
sin(1)
cos(1)
""",
expand_star_imports=True,
),
)
expected = b"""\
from math import cos, sin
sin(1)
cos(1)
"""
assert result == expected
def test_filter_code_ignore_multiple_star_import() -> None:
result = b"".join(
filter_code(
b"""\
from math import *
from re import *
sin(1)
cos(1)
""",
expand_star_imports=True,
),
)
expected = b"""\
from math import *
from re import *
sin(1)
cos(1)
"""
assert result == expected
def test_filter_code_with_special_re_symbols_in_key() -> None:
result = b"".join(
filter_code(
b"""\
a = {
'????': 3,
'????': 2,
}
print(a)
""",
remove_duplicate_keys=True,
),
)
expected = b"""\
a = {
'????': 2,
}
print(a)
"""
assert result == expected
@pytest.mark.parametrize(
("line", "previous_line", "expected"),
[
pytest.param(
rb"""\
import os, \
math, subprocess
""",
b"",
True,
id="backslash",
),
pytest.param(
b"""\
import os, math, subprocess
""",
b"",
False,
id="multiple imports in a single line",
),
pytest.param(
b"""\
import os, math, subprocess
""",
b"if: \\\n",
True,
id="multiple imports in a single line, but with previous_line",
),
pytest.param(
b"from os import (path, sep)",
b"",
True,
id="parens",
),
],
)
def test_is_multiline_import(line: bytes, previous_line: bytes, expected: bool) -> None:
assert is_multiline_import(line, previous_line=previous_line) is expected
@pytest.mark.parametrize(
("line", "previous_line", "expected"),
[
pytest.param(b"x = foo()", b"", False, id="simple assignment"),
pytest.param(b"x = 1;", b"", True, id="assignment with semicolon"),
pytest.param(b"import os; \\", b"", True, id="continuation (backslash)"),
pytest.param(b"foo(", b"", True, id="unclosed parens"),
pytest.param(b"1", b"x = \\", True, id="simple value, with previous_line"),
],
)
def test_multiline_statement(line: bytes, previous_line: bytes, expected: bool) -> None:
assert is_multiline_statement(line, previous_line=previous_line) is expected
@pytest.mark.parametrize(
("line", "expected"),
[
pytest.param(
b"import abc, subprocess, math\n",
b"import abc\nimport math\nimport subprocess\n",
id="basic case",
),
pytest.param(
b" import abc, subprocess, math\n",
b" import abc\n import math\n import subprocess\n",
id="with indentation",
),
pytest.param(
b"import abc, subprocess, math",
b"import abc, subprocess, math",
id="do nothing on line ending",
),
],
)
def test_break_up_import(line: bytes, expected: bytes) -> None:
assert break_up_import(line) == expected
def test_filter_from_import_no_remove() -> None:
result = filter_from_import(
b" from foo import abc, subprocess, math\n",
unused_module=(),
)
expected = b"""\
from foo import abc, math, subprocess\n"""
assert result == expected
def test_filter_from_import_remove_module() -> None:
result = filter_from_import(
b" from foo import abc, subprocess, math\n",
unused_module=(b"foo.abc",),
)
expected = b"""\
from foo import math, subprocess\n"""
assert result == expected
def test_filter_from_import() -> None:
result = filter_from_import(
b" from foo import abc, subprocess, math\n",
unused_module=(b"foo.abc", b"foo.subprocess", b"foo.math"),
)
expected = b" pass\n"
assert result == expected
def test_filter_code_multiline_imports() -> None:
result = b"".join(
filter_code(
rb"""\
import os
import re
import os, \
math, subprocess
os.foo()
""",
),
)
expected = rb"""\
import os
pass
import os
os.foo()
"""
assert result == expected
def test_filter_code_multiline_from_imports() -> None:
result = b"".join(
filter_code(
rb"""\
import os
import re
from os.path import (
exists,
join,
)
join('a', 'b')
from os.path import \
abspath, basename, \
commonpath
os.foo()
from os.path import \
isfile \
, isdir
isdir('42')
""",
),
)
expected = rb"""\
import os
pass
from os.path import (
join,
)
join('a', 'b')
pass
os.foo()
from os.path import \
isdir
isdir('42')
"""
assert result == expected
def test_filter_code_should_ignore_semicolons() -> None:
result = b"".join(
filter_code(
rb"""\
import os
import re
import os; import math, subprocess
os.foo()
""",
),
)
expected = rb"""\
import os
pass
import os; import math, subprocess
os.foo()
"""
assert result == expected
def test_filter_code_should_ignore_docstring() -> None:
line = b"""
def foo():
'''
>>> import math
'''
"""
assert b"".join(filter_code(line)) == line
def test_fix_code() -> None:
result = fix_code(
b"""\
import os
import re
import abc, math, subprocess
from sys import exit, version
os.foo()
math.pi
x = version
""",
)
expected = b"""\
import os
import math
from sys import version
os.foo()
math.pi
x = version
"""
assert result == expected
def test_fix_code_with_from_and_as__mixed() -> None:
result = fix_code(
b"""\
from collections import defaultdict, namedtuple as xyz
xyz
""",
)
expected = b"""\
from collections import namedtuple as xyz
xyz
"""
assert result == expected
def test_fix_code_with_from_and_as__multiple() -> None:
result = fix_code(
b"""\
from collections import defaultdict as abc, namedtuple as xyz
xyz
""",
)
expected = b"""\
from collections import namedtuple as xyz
xyz
"""
assert result == expected
def test_fix_code_with_from_and_as__unused_as() -> None:
result = fix_code(
b"""\
from collections import defaultdict as abc, namedtuple
namedtuple
""",
)
expected = b"""\
from collections import namedtuple
namedtuple
"""
assert result == expected
def test_fix_code_with_from_and_as__all_unused() -> None:
result = fix_code(
b"""\
from collections import defaultdict as abc, namedtuple as xyz
""",
)
assert result == b""
def test_fix_code_with_from_and_as__custom_modules() -> None:
code = b"""\
from x import a as b, c as d
"""
assert fix_code(code) == b""
def test_fix_code_with_from_and_depth_module() -> None:
expected = b"""\
from distutils.version import StrictVersion
StrictVersion('1.0.0')
"""
result = fix_code(
b"""\
from distutils.version import LooseVersion, StrictVersion
StrictVersion('1.0.0')
""",
)
assert result == expected
def test_fix_code_with_from_and_depth_module__aliasing() -> None:
result = fix_code(
b"""\
from distutils.version import LooseVersion, StrictVersion as version
version('1.0.0')
""",
)
expected = b"""\
from distutils.version import StrictVersion as version
version('1.0.0')
"""
assert result == expected
def test_fix_code_with_indented_from() -> None:
result = fix_code(
b"""\
def z():
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
POINTER, byref
""",
)
expected = b"""\
def z():
from ctypes import POINTER, byref
POINTER, byref
"""
assert result == expected
def test_fix_code_with_indented_from__all_unused() -> None:
result = fix_code(
b"""\
def z():
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
""",
)
expected = b"""\
def z():
pass
"""
assert result == expected
def test_fix_code_with_empty_string() -> None:
assert fix_code(b"") == b""
def test_fix_code_with_from_and_as_and_escaped_newline() -> None:
b"""Make sure stuff after escaped newline is not lost."""
result = fix_code(
b"""\
from collections import defaultdict, namedtuple \\
as xyz
xyz
""",
)
# We currently leave lines with escaped newlines as is. But in the
# future this we may parse them and remove unused import accordingly.
# For now, we'll work around it here.
result = re.sub(rb" *\\\n *as ", b" as ", result)
expected = b"""\
from collections import namedtuple as xyz
xyz
"""
assert fix_code(result) == expected
def test_fix_code_with_unused_variables() -> None:
result = fix_code(
b"""\
def main():
x = 10
y = 11
print(y)
""",
remove_unused_variables=True,
)
expected = b"""\
def main():
y = 11
print(y)
"""
assert result == expected
def test_fix_code_with_unused_variables_should_skip_nonlocal() -> None:
"""pyflakes does not handle nonlocal correctly."""
code = b"""\
def bar():
x = 1
def foo():
nonlocal x
x = 2
"""
assert fix_code(code, remove_unused_variables=True) == code
def test_fix_code_with_comma_on_right() -> None:
result = fix_code(
b"""\
def main():
x = (1, 2, 3)
""",
remove_unused_variables=True,
)
expected = b"""\
def main():
pass
"""
assert result == expected
def test_fix_code_with_unused_variables_should_skip_multiple() -> None:
code = b"""\
def main():
(x, y, z) = (1, 2, 3)
print(z)
"""
assert fix_code(code, remove_unused_variables=True) == code
def test_fix_code_should_handle_pyflakes_recursion_error_gracefully() -> None:
code = "x = [{}]".format("+".join("abc" for _ in range(2000))).encode()
assert fix_code(code) == code
def test_fix_code_with_duplicate_key() -> None:
result = fix_code(
b"""\
a = {
(0,1): 1,
(0, 1): 'two',
(0,1): 3,
}
print(a)
""",
remove_duplicate_keys=True,
)
expected = b"""\
a = {
(0,1): 3,
}
print(a)
"""
assert result == expected
def test_fix_code_with_duplicate_key_longer() -> None:
expected = b"""\
{
'a': 0,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'b': 6,
}
"""
result = fix_code(
b"""\
{
'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'b': 6,
}
""",
remove_duplicate_keys=True,
)
assert result == expected
def test_fix_code_with_duplicate_key_with_many_braces() -> None:
result = fix_code(
b"""\
a = None
{None: {None: None},
}
{
None: a.a,
None: a.b,
}
""",
remove_duplicate_keys=True,
)
expected = b"""\
a = None
{None: {None: None},
}
{
None: a.b,
}
"""
assert result == expected
def test_fix_code_should_ignore_complex_case_of_duplicate_key() -> None:
code = b"""\
a = {(0,1): 1, (0, 1): 'two',
(0,1): 3,
}
print(a)
"""
assert fix_code(code, remove_duplicate_keys=True) == code
def test_fix_code_should_ignore_complex_case_of_duplicate_key_comma() -> None:
code = b"""\
{
1: {0,
},
1: {2,
},
}
"""
assert fix_code(code, remove_duplicate_keys=True) == code
def test_fix_code_should_ignore_complex_case_of_duplicate_key_partially() -> None:
"""We only handle simple cases."""
code = b"""\
a = {(0,1): 1, (0, 1): 'two',
(0,1): 3,
(2,3): 4,
(2,3): 4,
(2,3): 5,
}
print(a)
"""
expected = b"""\
a = {(0,1): 1, (0, 1): 'two',
(0,1): 3,
(2,3): 5,
}
print(a)
"""
assert fix_code(code, remove_duplicate_keys=True) == expected
def test_fix_code_should_ignore_more_cases_of_duplicate_key() -> None:
"""We only handle simple cases."""
code = b"""\
a = {
(0,1):
1,
(0, 1): 'two',
(0,1): 3,
}
print(a)
"""
assert fix_code(code, remove_duplicate_keys=True) == code
def test_fix_code_should_ignore_duplicate_key_with_comments() -> None:
"""We only handle simple cases."""
code = b"""\
a = {
(0,1) # : f
:
1,
(0, 1): 'two',
(0,1): 3,
}
print(a)
"""
assert fix_code(code, remove_duplicate_keys=True) == code
code = b"""\
{
1: {0,
},
1: #{2,
#},
0
}
"""
assert fix_code(code, remove_duplicate_keys=True) == code
def test_fix_code_should_ignore_duplicate_key_with_multiline_key() -> None:
"""We only handle simple cases."""
code = b"""\
a = {
(0,1
): 1,
(0, 1): 'two',
(0,1): 3,
}
print(a)
"""
assert fix_code(code, remove_duplicate_keys=True) == code
def test_fix_code_should_ignore_duplicate_key_with_no_comma() -> None:
"""We don't want to delete the line and leave a lone comma."""
code = b"""\
a = {
(0,1) : 1
,
(0, 1): 'two',
(0,1): 3,
}
print(a)
"""
assert fix_code(code, remove_duplicate_keys=True) == code
def test_useless_pass_line_numbers() -> None:
assert list(useless_pass_line_numbers(b"pass\n")) == [1]
assert list(useless_pass_line_numbers(b"if True:\n pass\n")) == []
def test_useless_pass_line_numbers_with_escaped_newline() -> None:
assert list(useless_pass_line_numbers(b"if True:\\\n pass\n")) == []
def test_useless_pass_line_numbers_with_more_complex() -> None:
result = list(
useless_pass_line_numbers(
b"""\
if True:
pass
else:
True
x = 1
pass
""",
),
)
assert result == [6]
def test_filter_useless_pass() -> None:
result = b"".join(
filter_useless_pass(
b"""\
if True:
pass
else:
True
x = 1
pass
""",
),
)
expected = b"""\
if True:
pass
else:
True
x = 1
"""
assert result == expected
def test_filter_useless_pass_with_syntax_error() -> None:
source = b"""\
if True:
if True:
if True:
if True:
if True:
pass
else:
True
pass
pass
x = 1
"""
assert b"".join(filter_useless_pass(source)) == source
def test_filter_useless_pass_more_complex() -> None:
result = b"".join(
filter_useless_pass(
b"""\
if True:
pass
else:
def foo():
pass
# abc
def bar():
# abc
pass
def blah():
123
pass
pass # Nope.
pass
True
x = 1
pass
""",
),
)
expected = b"""\
if True:
pass
else:
def foo():
pass
# abc
def bar():
# abc
pass
def blah():
123
pass # Nope.
True
x = 1
"""
assert result == expected
def test_filter_useless_pass_with_try() -> None:
result = b"".join(
filter_useless_pass(
b"""\
import os
os.foo()
try:
pass
pass
except ImportError:
pass
""",
),
)
expected = b"""\
import os
os.foo()
try:
pass
except ImportError:
pass
"""
assert result == expected
def test_filter_useless_pass_leading_pass() -> None:
result = b"".join(
filter_useless_pass(
b"""\
if True:
pass
pass
pass
pass
else:
pass
True
x = 1
pass
""",
),
)
expected = b"""\
if True:
pass
else:
True
x = 1
"""
assert result == expected
def test_filter_useless_pass_leading_pass_with_number() -> None:
result = b"".join(
filter_useless_pass(
b"""\
def func11():
pass
0, 11 / 2
return 1
""",
),
)
expected = b"""\
def func11():
0, 11 / 2
return 1
"""
assert result == expected
def test_filter_useless_pass_leading_pass_with_string() -> None:
result = b"".join(
filter_useless_pass(
b"""\
def func11():
pass
'hello'
return 1
""",
),
)
expected = b"""\
def func11():
'hello'
return 1
"""
assert result == expected
def test_check() -> None:
assert check(b"import os")
def test_check_with_bad_syntax() -> None:
assert check(b"foo(") == []
def test_check_with_unicode() -> None:
assert check('print("∑")'.encode()) == []
assert check("import os # ∑".encode())
def test_get_diff_text() -> None:
result = "\n".join(
get_diff_text(["foo\n"], ["bar\n"], "").split("\n")[3:],
)
expected = """\
-foo
+bar
"""
assert result == expected
def test_get_diff_text_without_newline() -> None:
result = "\n".join(get_diff_text(["foo"], ["foo\n"], "").split("\n")[3:])
expected = """\
-foo
\\ No newline at end of file
+foo
"""
assert result == expected
def test_is_literal_or_name() -> None:
assert is_literal_or_name(b"123") is True
assert is_literal_or_name(b"[1, 2, 3]") is True
assert is_literal_or_name(b"xyz") is True
assert is_literal_or_name(b"xyz.prop") is False
assert is_literal_or_name(b" ") is False
def test_is_python_file(
temporary_file: Callable[..., "_GeneratorContextManager[str]"],
root_dir: pathlib.Path,
) -> None:
assert is_python_file(str(root_dir / "autoflake8" / "cli.py")) is True
with temporary_file("#!/usr/bin/env python", suffix="") as filename:
assert is_python_file(filename) is True
with temporary_file("#!/usr/bin/python", suffix="") as filename:
assert is_python_file(filename) is True
with temporary_file("#!/usr/bin/python3", suffix="") as filename:
assert is_python_file(filename) is True
with temporary_file("#!/usr/bin/pythonic", suffix="") as filename:
assert is_python_file(filename) is False
with temporary_file("###!/usr/bin/python", suffix="") as filename:
assert is_python_file(filename) is False
assert is_python_file(os.devnull) is False
assert is_python_file("/bin/bash") is False
@pytest.mark.parametrize(
("filename", "exclude", "expected"),
[
("1.py", ["test*", "1*"], True),
("2.py", ["test*", "1*"], False),
("test/test.py", ["test/**.py"], True),
("test/auto_test.py", ["test/*_test.py"], True),
("test/auto_auto.py", ["test/*_test.py"], False),
],
)
def test_is_exclude_file(filename: str, exclude: Iterable[str], expected: bool) -> None:
assert is_exclude_file(filename, exclude) is expected
def test_match_file(
temporary_file: Callable[..., "_GeneratorContextManager[str]"],
logger: logging.Logger,
) -> None:
with temporary_file("", suffix=".py", prefix=".") as filename:
assert match_file(filename, exclude=[], logger=logger) is False
assert match_file(os.devnull, exclude=[], logger=logger) is False
with temporary_file("", suffix=".py", prefix="") as filename:
assert match_file(filename, exclude=[], logger=logger) is True
def test_find_files(tmp_path: pathlib.Path, logger: logging.Logger) -> None:
target = tmp_path / "dir"
target.mkdir(parents=True)
(target / "a.py").write_text("")
exclude = target / "ex"
exclude.mkdir()
(exclude / "b.py").write_text("")
sub = exclude / "sub"
sub.mkdir()
(sub / "c.py").write_text("")
files = list(
find_files([str(tmp_path / "dir")], True, [str(exclude)], logger=logger),
)
file_names = [os.path.basename(f) for f in files]
assert "a.py" in file_names
assert "b.py" not in file_names
assert "c.py" not in file_names
def test_exclude(
autoflake8_command: List[str],
temporary_directory: Callable[..., "_GeneratorContextManager[str]"],
) -> None:
with temporary_directory(directory=".") as temp_directory:
with open(os.path.join(temp_directory, "a.py"), "w") as output:
output.write("import re\n")
os.mkdir(os.path.join(temp_directory, "d"))
with open(os.path.join(temp_directory, "d", "b.py"), "w") as output:
output.write("import os\n")
p = subprocess.Popen(
autoflake8_command + [temp_directory, "--recursive", "--exclude=a*"],
stdout=subprocess.PIPE,
)
stdout, _ = p.communicate()
result = stdout.decode("utf-8")
assert "import re" not in result
assert "import os" in result
|
py | 1a3771807f3cbb5df1c0cd32a2f3b81c7a4f4de8 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, check_consistent_length
from .utils.extmath import safe_sparse_dot
from .utils.fixes import logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB']
class BaseNB(BaseEstimator, ClassifierMixin, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, optional (default=1e-9)
Portion of the largest variance of all features that is added to
variances for calculation stability.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
epsilon_ : float
absolute additive value to variances
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None, var_smoothing=1e-09)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None, var_smoothing=1e-09)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
_ALPHA_MIN = 1e-10
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Smoothing parameter alpha = %.1e. '
'alpha should be > 0.' % np.min(self.alpha))
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.feature_count_.shape[1]:
raise ValueError("alpha should be a scalar or a numpy array "
"with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn('alpha too small will result in numeric errors, '
'setting alpha = %.1e' % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes] (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples] (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : array, shape (n_classes, )
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : array, shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class ComplementNB(BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
fit_prior : boolean, optional (default=True)
Only used in edge case with a single class in the training set.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. Not used.
norm : boolean, optional (default=False)
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical weights for class complements.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_all_ : array, shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB(alpha=1.0, class_prior=None, fit_prior=True, norm=False)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None,
norm=False):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.norm = norm
def _count(self, X, Y):
"""Count feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# BaseNB.predict uses argmax, but ComplementNB operates with argmin.
feature_log_prob = -logged
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = -feature_log_prob / summed
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse="csr")
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
py | 1a3771edc21d951ce36b7eb557dace61924d8b7f | from abstract.instruccion import *
from tools.tabla_tipos import *
class inherits(instruccion):
def __init__(self, ID, line, column, num_nodo):
super().__init__(line,column)
self.ID = ID
#Nodo INHERITS
self.nodo = nodo_AST('INHERITS', num_nodo)
self.nodo.hijos.append(nodo_AST('INHERITS', num_nodo+1))
self.nodo.hijos.append(nodo_AST('(', num_nodo + 2))
self.nodo.hijos.append(nodo_AST(ID, num_nodo + 3))
self.nodo.hijos.append(nodo_AST(')', num_nodo + 4))
#Gramatica
self.grammar_ = '<TR><TD> INHERITS ::= INHERITS (' + ID + ') </TD><TD> new inherits(' + ID + '); </TD></TR>'
def ejecutar(self):
pass |
py | 1a3772826ff4c71f930cefff8acb7f1eb1e25538 | from __future__ import annotations
import typing
from types import TracebackType
import httpx
from ._endpoints_mappings import MappingsEndpoint
from ._endpoints_near_misses import NearMissesEndpoint
from ._endpoints_recordings import RecordingsEndpoint
from ._endpoints_requests import RequestsEndpoint
from ._endpoints_scenarios import ScenariosEndpoint
from ._endpoints_system import SystemEndpoint
from ._exceptions import WiremockConnectionException
from ._exceptions import WiremockForbiddenException
from ._exceptions import WiremockMalformedRequest
from ._exceptions import WiremockNotFoundException
from ._exceptions import WiremockServerException
from ._exceptions import WiremockTimeoutException
from ._response import WiremockResponse
from ._schemas import WiremockSchema
from ._types import TimeoutTypes
from ._types import VerifyTypes
class WiremockClient:
"""
A (synchronous) python client for the wiremock admin API.
The WiremockClient instance is a facade of various wiremock endpoints; to access the endpoints
refer to:
https://wiremock.org/docs/api/
:param host: The host of the running wiremock instance
:param port: The port wiremock is listening on
:param timeout: Configuration for connect, read, write & pool timeouts.
Timeout can be either a tuple of up to length 4; a single float (for all equal timeouts)
or a httpx.Timeout instance.
:param client_verify: configure ssl configurations; False by default and not checking SSL certs.
"""
def __init__(
self,
https: bool = False,
host: str = "localhost",
port: int = 8080,
timeout: TimeoutTypes = 30.00,
client_verify: VerifyTypes = False,
) -> None:
protocol = "http" if not https else "https"
self.host = f"{protocol}://{host}:{port}/__admin/"
self.client = httpx.Client(base_url=self.host, timeout=timeout, verify=client_verify)
self.dispatcher = Dispatcher(self.client, self.host)
self.stubs = MappingsEndpoint(self.dispatcher)
self.requests = RequestsEndpoint(self.dispatcher)
self.near_misses = NearMissesEndpoint(self.dispatcher)
self.recordings = RecordingsEndpoint(self.dispatcher)
self.scenarios = ScenariosEndpoint(self.dispatcher)
self.settings = SystemEndpoint(self.dispatcher)
def __enter__(self) -> WiremockClient:
return self
def __exit__(
self,
exc_type: typing.Optional[typing.Type[BaseException]] = None,
exc_val: typing.Optional[BaseException] = None,
exc_tb: typing.Optional[TracebackType] = None,
):
self.client.close()
def __del__(self) -> None:
self.client.close()
class Dispatcher:
def __init__(self, client: httpx.Client, host: str) -> None:
self.client = client
self.host = host
def __call__( # type: ignore[return]
self,
*,
method: str,
url: str,
payload: typing.Optional[typing.Any] = None,
params: typing.Optional[typing.Dict[str, typing.Any]] = None,
schema: typing.Optional[typing.Type[WiremockSchema]] = None,
schema_kw: typing.Optional[typing.Dict[typing.Any, typing.Any]] = None,
) -> WiremockResponse:
"""Dispatches HTTP requests. We could implement this via __call__ but it should be private."""
if schema is not None:
payload = schema(**schema_kw or {}).dump(payload)
try:
httpx_response = self.client.request(method=method, url=url, json=payload)
print(httpx_response.request.content, httpx_response.request.url)
status = httpx_response.status_code
if status in (200, 201):
# Successfully fetching/creating a resource.
return WiremockResponse(httpx_response)
elif status == 401:
raise WiremockForbiddenException(httpx_response.text, status)
elif status == 404:
raise WiremockNotFoundException(
f"No wiremock instance running, {httpx_response.request.url} not found.", status
)
elif status == 422:
raise WiremockMalformedRequest(httpx_response.text, status)
elif status == 500:
raise WiremockServerException(httpx_response.extensions["reason_phrase"], status)
except httpx.TimeoutException as exc:
raise WiremockTimeoutException(str(exc)) from None
except httpx.ConnectError:
raise WiremockConnectionException(self.host) from None
|
py | 1a377532a3707b2310d9f15fa9ab7746a07c1770 |
#THIS IS CURRENTLY BEING USED FOR ANY WORLD DISPLAYS / ADDING IN WRONG SYNTAX RESPONSES ALSO SOON.
worldNotice = {
"helper": {
"barrier": "-----------------------------------------------------------------------",
'public_notice_symbol': '@Notice:',
"examine": "You Examine Your {0}",
"temp_name": "Welcome To Eeveaem, Please Enjoy Your Journey.",
"enter_message": "{0} Has Joined Eeveaem.",
"quit_message": "{0} Has Departed Eeveaem.",
"welcome_part1": "Welcome To The Chain! ",
"welcome_part2": " [HELP/HEL] displays list of commands. Have fun!",
"helpstart": """welcome to help!\n\r
please feel free at any point to use the '/' command for special help on any given topic. example being /command\n\r
would then display a detailed help packet for that command\n\r
if you find any commands that are not listed feel free\n\r
to Contact the devs! commands can be found with '/command'. as always ~Enjoy!~ ~Skrptek & Crew~""",
"helpcommand": """ Hello! Welcome To '/command' ! Here You Can Find A Detailed List Of Commands!\n\r
~Format Is [COMMAND/COMMAND_SHORT] - [ACTION_PROVIDED]~\n\r
*[HELP/HEL] - [DISPLAYS FIRST HELP MESSAGE]\n\r
*[/] - [DISPLAYS SPECIFIC HELP ON A TOPIC USAGE: /*SAY OR /COMMAND]\n\r
*[SAY/SAYS/HABLO/S] - [DISPLAYS A MESSAGE FROM CHARACTER TO THE ENVIRONMENT]\n\r
*[*SAY/*SAYS] - [CHANGES THE WAY THE USER SPEAKS]\n\r
*[INVENTORY/INVENTOR/INVENTO/INVENT/INVEN/INVE/INV/GEAR/EQUIPMENT] - [CHECKS CURRENT INVENTORY]\n\r
*[BALANCE.<Token / Koin>] - [USED TO CHECK USER CURRENCY LIST]\n\r
*[EXIT/QUIT/TERMINATE] - [EXITS THE USER AND CLOSES CURRENT SESSION]\n\r""",
"say_change": """ hello there welcome to the *say voice changer! you may\n\r
use any of the options below! please enjoy!\n\r
PLEASE REMEMBER WHATEVER YOU CHOOSE BELOW WILL BE YOUR CHOICE+(S) '*say BARK' Skrypt Barks :+: MESSAGE\n\r
PLEASE UNDERSTAND THE DIFFERENCE BETWEEN PLURAL USAGE. EXAMPLE BELOW:\n\r
______________________________________________________________________\n\r
(Non-plural Usage)\n\r
*say chirp\n\r
say hello\n\r
>User Chirps :+: Hello\n\r
______________________\n\r
(Plural Usage)\n\r
*say chirps\n\r
say hello\n\r
>User Chirps And Says :+: Hello\n\r
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\r
*say [SAY, SING/S, CHIRP/S, BARK/S, GROWL/S, HUM/S, GRUNT/S, BURP/S, GIGGLE/S, SNICKER/S, CLICK/S, SIGN/S, BUBBLE/S, YOWL/S, SNAP/S]\n\r
""",
}
} |
py | 1a37756fa5e442b2f5c342840bdc8971a0024035 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-17 15:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qty_needed', models.DecimalField(decimal_places=5, max_digits=15, validators=[django.core.validators.MinValueValidator(0)])),
],
options={
'verbose_name': 'Ingrediente',
'verbose_name_plural': 'Ingredienti',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('note', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'Menu',
'verbose_name_plural': 'Menu',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('kind', models.SmallIntegerField(choices=[(1, 'Piatto'), (2, 'Preparato'), (3, 'Materia prima')])),
('is_complete_meal', models.NullBooleanField()),
],
options={
'verbose_name': 'Prodotto',
'verbose_name_plural': 'Prodotti',
},
),
migrations.CreateModel(
name='Shelf',
fields=[
('id', models.CharField(max_length=5, primary_key=True, serialize=False)),
],
options={
'verbose_name': 'Scaffale',
'verbose_name_plural': 'Scaffali',
},
),
migrations.CreateModel(
name='UoM',
fields=[
('name', models.CharField(max_length=20, primary_key=True, serialize=False)),
('readable_name', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'verbose_name': 'Unità di misura',
'verbose_name_plural': 'Unità di misura',
},
),
migrations.CreateModel(
name='Warehouse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.DecimalField(decimal_places=5, max_digits=15, validators=[django.core.validators.MinValueValidator(0)])),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'verbose_name': 'Magazzino',
'verbose_name_plural': 'Magazzino',
},
),
migrations.CreateModel(
name='Recipe',
fields=[
('product', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='core.Product')),
('note', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'Ricetta',
'verbose_name_plural': 'Ricette',
},
),
migrations.AddField(
model_name='warehouse',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Product'),
),
migrations.AddField(
model_name='warehouse',
name='shelf',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Shelf'),
),
migrations.AddField(
model_name='product',
name='uom',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.UoM'),
),
migrations.AddField(
model_name='menu',
name='courses',
field=models.ManyToManyField(to='core.Product'),
),
migrations.AddField(
model_name='ingredient',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Product'),
),
migrations.AddField(
model_name='ingredient',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Recipe'),
),
migrations.AlterUniqueTogether(
name='ingredient',
unique_together=set([('product', 'recipe')]),
),
]
|
py | 1a3775f29b45f187cf1f341d97fa34c97fbe0fbb | from abc import ABC
import numpy as np
from numpy.lib.function_base import copy
from scipy.signal import convolve2d
from typing import List
import re
import sys
class Filter(ABC):
def apply(self, data: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class ConvFilter(Filter):
def __init__(self, weights: np.ndarray, bias: float = 0) -> None:
super().__init__()
assert len(weights.shape) == 2
assert weights.shape[0] == weights.shape[1]
self.weights = weights
self.bias = bias
def apply(self, data: np.ndarray) -> np.ndarray:
return convolve2d(data, self.weights[::-1,::-1], mode='same') + self.bias
class Activation(ABC):
def apply(self, data: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class ReLUActivation(Activation):
def __init__(self, threshold: float = 0) -> None:
super().__init__()
self.threshold = threshold
def apply(self, data: np.ndarray) -> np.ndarray:
data = data.copy()
data[data < self.threshold] = self.threshold
return data
class ConvUsingProgram:
def __init__(self, filters: List[Filter], activation: Activation) -> None:
self.filters = filters
print('filters:')
print('\n'.join([str(filter.weights) for filter in filters]))
self.activation = activation
def run(self, data: np.ndarray) -> np.ndarray:
states = set()
print('run:')
while True:
print(data)
#input('step?')
result = self.step(data)
res_key = result.tobytes()
if res_key in states:
return result
states.add(res_key)
data = result
def step(self, data: np.ndarray) -> np.ndarray:
filter_result = np.array([filter.apply(data) for filter in self.filters])
#print(filter_result)
activation_result = self.activation.apply(filter_result)
#print(activation_result)
return np.sum(activation_result, axis=0)
def parse_program(code: str) -> ConvUsingProgram:
code_lines = _parse_lines(code)
filters = []
i = 0
while i < len(code_lines):
size = len(code_lines[i])
if size == 2:
size = 1
assert size % 2 != 0
weights = np.nan_to_num(np.array(code_lines[i:i+size]), copy=False)
bias = code_lines[i+size][0]
filters.append(ConvFilter(weights, bias=bias))
i += size + 1
activation = ReLUActivation(threshold=0)
return ConvUsingProgram(filters, activation)
def parse_data(data: str) -> np.ndarray:
data_lines = _parse_lines(data)
return np.nan_to_num(np.array(data_lines), copy=False)
def _parse_lines(text: str) -> List[List[float]]:
return [
code_line
for code_line in [
[float(number) for number in re.findall('([+-]?(?:\d+\.?\d*|\.\d+|inf))', line)]
for line in text.splitlines()
]
if len(code_line) > 0
]
if __name__ == '__main__':
with open(sys.argv[1]) as f:
program = parse_program(f.read())
with open(sys.argv[2]) as f:
data = parse_data(f.read())
result = program.run(data)
print('result:')
print(result) |
py | 1a37763d5e06e840744425146a7237db7bcf6f45 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Faithcoin should be started with the command line arguments:
arvcoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 4):
print("This example only works with Python 3.4 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
py | 1a37766a38a946e7fbb66dfae84dafbdc0454b35 | import ms2pip.peptides
class TestModifications:
def test_add_from_ms2pip_modstrings(self):
mods = ms2pip.peptides.Modifications()
mods.add_from_ms2pip_modstrings([
"Oxidation,15.994915,opt,M",
"Acetyl,42.010565,opt,N-term",
"Methyl,14.01565,opt,L",
])
assert mods.modifications['ptm']["Oxidation"]["amino_acid"] == "M"
assert mods.modifications['ptm']["Acetyl"]["mass_shift"] == 42.010565
assert mods.modifications['ptm']["Methyl"]["mass_shift"] == 14.01565
def test_get_mass_shifts(self):
mods = ms2pip.peptides.Modifications()
mods.add_from_ms2pip_modstrings([
"Oxidation,15.994915,opt,M"
])
assert mods.mass_shifts["Oxidation"] == 15.994915
# Test cache clear after adding new modifications
mods.add_from_ms2pip_modstrings([
"Acetyl,42.010565,opt,N-term",
])
assert mods.mass_shifts["Acetyl"] == 42.010565
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.