metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jeffcpullen/scap-security-guide",
"score": 2
}
|
#### File: tests/ssg_test_suite/rule.py
```python
from __future__ import print_function
import logging
import os
import os.path
import re
import subprocess
import collections
import json
from ssg_test_suite import oscap
from ssg_test_suite import xml_operations
from ssg_test_suite import test_env
from ssg_test_suite import common
from ssg_test_suite.log import LogHelper
import data
logging.getLogger(__name__).addHandler(logging.NullHandler())
Scenario = collections.namedtuple(
"Scenario", ["script", "context", "script_params"])
def _parse_parameters(script):
"""Parse parameters from script header"""
params = {'profiles': [],
'templates': [],
'platform': ['multi_platform_all'],
'remediation': ['all']}
with open(script, 'r') as script_file:
script_content = script_file.read()
for parameter in params:
found = re.search('^# {0} = ([ ,_\.\-\w]*)$'.format(parameter),
script_content,
re.MULTILINE)
if found is None:
continue
splitted = found.group(1).split(',')
params[parameter] = [value.strip() for value in splitted]
return params
def get_viable_profiles(selected_profiles, datastream, benchmark):
"""Read datastream, and return set intersection of profiles of given
benchmark and those provided in `selected_profiles` parameter.
"""
valid_profiles = []
all_profiles = xml_operations.get_all_profiles_in_benchmark(
datastream, benchmark, logging)
for ds_profile_element in all_profiles:
ds_profile = ds_profile_element.attrib['id']
if 'ALL' in selected_profiles:
valid_profiles += [ds_profile]
continue
for sel_profile in selected_profiles:
if ds_profile.endswith(sel_profile):
valid_profiles += [ds_profile]
if not valid_profiles:
logging.error('No profile ends with "{0}"'
.format(", ".join(selected_profiles)))
return valid_profiles
def _run_with_stdout_logging(command, args, log_file):
log_file.write("{0} {1}\n".format(command, " ".join(args)))
subprocess.check_call(
(command,) + args, stdout=log_file, stderr=subprocess.STDOUT)
def _send_scripts(domain_ip):
remote_dir = './ssgts'
archive_file = data.create_tarball('.')
remote_archive_file = os.path.join(remote_dir, archive_file)
machine = "root@{0}".format(domain_ip)
logging.debug("Uploading scripts.")
log_file_name = os.path.join(LogHelper.LOG_DIR, "data.upload.log")
with open(log_file_name, 'a') as log_file:
args = common.SSH_ADDITIONAL_OPTS + (machine, "mkdir", "-p", remote_dir)
try:
_run_with_stdout_logging("ssh", args, log_file)
except Exception:
msg = "Cannot create directory {0}.".format(remote_dir)
logging.error(msg)
raise RuntimeError(msg)
args = (common.SSH_ADDITIONAL_OPTS
+ (archive_file, "{0}:{1}".format(machine, remote_dir)))
try:
_run_with_stdout_logging("scp", args, log_file)
except Exception:
msg = ("Cannot copy archive {0} to the target machine's directory {1}."
.format(archive_file, remote_dir))
logging.error(msg)
raise RuntimeError(msg)
args = (common.SSH_ADDITIONAL_OPTS
+ (machine, "tar xf {0} -C {1}".format(remote_archive_file, remote_dir)))
try:
_run_with_stdout_logging("ssh", args, log_file)
except Exception:
msg = "Cannot extract data tarball {0}.".format(remote_archive_file)
logging.error(msg)
raise RuntimeError(msg)
return remote_dir
def _apply_script(rule_dir, domain_ip, script):
"""Run particular test script on VM and log it's output."""
machine = "root@{0}".format(domain_ip)
logging.debug("Applying script {0}".format(script))
rule_name = os.path.basename(rule_dir)
log_file_name = os.path.join(
LogHelper.LOG_DIR, rule_name + ".prescripts.log")
with open(log_file_name, 'a') as log_file:
log_file.write('##### {0} / {1} #####\n'.format(rule_name, script))
command = "cd {0}; bash -x {1}".format(rule_dir, script)
args = common.SSH_ADDITIONAL_OPTS + (machine, command)
try:
_run_with_stdout_logging("ssh", args, log_file)
except subprocess.CalledProcessError as exc:
logging.error("Rule testing script {script} failed with exit code {rc}"
.format(script=script, rc=exc.returncode))
return False
return True
def _get_script_context(script):
"""Return context of the script."""
result = re.search('.*\.([^.]*)\.[^.]*$', script)
if result is None:
return None
return result.group(1)
def _matches_target(rule_dir, targets):
if 'ALL' in targets:
# we want to have them all
return True
else:
for target in targets:
if target in rule_dir:
return True
return False
def _get_scenarios(rule_dir, scripts, scenarios_regex, benchmark_cpes):
""" Returns only valid scenario files, rest is ignored (is not meant
to be executed directly.
"""
if scenarios_regex is not None:
scenarios_pattern = re.compile(scenarios_regex)
scenarios = []
for script in scripts:
if scenarios_regex is not None:
if scenarios_pattern.match(script) is None:
logging.debug("Skipping script %s - it did not match --scenarios regex" % script)
continue
script_context = _get_script_context(script)
if script_context is not None:
script_params = _parse_parameters(os.path.join(rule_dir, script))
if common.matches_platform(script_params["platform"], benchmark_cpes):
scenarios += [Scenario(script, script_context, script_params)]
else:
logging.info("Script %s is not applicable on given platform" % script)
return scenarios
class RuleChecker(oscap.Checker):
"""
Rule checks generally work like this -
for every profile that supports that rule:
- Alter the system.
- Run the scan, check that the result meets expectations.
If the test scenario passed as requested, return True,
if it failed or passed unexpectedly, return False.
The following sequence applies if the initial scan
has failed as expected:
- If there are no remediations, return True.
- Run remediation, return False if it failed.
- Return result of the final scan of remediated system.
"""
def __init__(self, test_env):
super(RuleChecker, self).__init__(test_env)
self._matching_rule_found = False
self.results = list()
self._current_result = None
def _run_test(self, profile, test_data):
scenario = test_data["scenario"]
rule_id = test_data["rule_id"]
LogHelper.preload_log(
logging.INFO, "Script {0} using profile {1} OK".format(scenario.script, profile),
log_target='pass')
LogHelper.preload_log(
logging.ERROR,
"Script {0} using profile {1} found issue:".format(scenario.script, profile),
log_target='fail')
runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using]
runner = runner_cls(
self.test_env, profile, self.datastream, self.benchmark_id,
rule_id, scenario.script, self.dont_clean, self.manual_debug)
if not self._initial_scan_went_ok(runner, rule_id, scenario.context):
return False
supported_and_available_remediations = self._get_available_remediations(scenario)
if (scenario.context not in ['fail', 'error']
or not supported_and_available_remediations):
return True
if not self._remediation_went_ok(runner, rule_id):
return False
return self._final_scan_went_ok(runner, rule_id)
def _initial_scan_went_ok(self, runner, rule_id, context):
success = runner.run_stage_with_context("initial", context)
self._current_result.record_stage_result("initial_scan", success)
if not success:
msg = ("The initial scan failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _get_available_remediations(self, scenario):
is_supported = set(['all'])
is_supported.add(
oscap.REMEDIATION_RUNNER_TO_REMEDIATION_MEANS[self.remediate_using])
supported_and_available_remediations = set(
scenario.script_params['remediation']).intersection(is_supported)
return supported_and_available_remediations
def _remediation_went_ok(self, runner, rule_id):
success = runner.run_stage_with_context('remediation', 'fixed')
self._current_result.record_stage_result("remediation", success)
if not success:
msg = ("The remediation failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _final_scan_went_ok(self, runner, rule_id):
success = runner.run_stage_with_context('final', 'pass')
self._current_result.record_stage_result("final_scan", success)
if not success:
msg = ("The check after remediation failed for rule '{}'."
.format(rule_id))
logging.error(msg)
return success
def _test_target(self, target):
try:
remote_dir = _send_scripts(self.test_env.domain_ip)
except RuntimeError as exc:
msg = "Unable to upload test scripts: {more_info}".format(more_info=str(exc))
raise RuntimeError(msg)
self._matching_rule_found = False
with test_env.SavedState.create_from_environment(self.test_env, "tests_uploaded") as state:
for rule in data.iterate_over_rules():
if not _matches_target(rule.directory, target):
continue
self._matching_rule_found = True
self._check_rule(rule, remote_dir, state)
if not self._matching_rule_found:
logging.error("No matching rule ID found for '{0}'".format(target))
def _check_rule(self, rule, remote_dir, state):
remote_rule_dir = os.path.join(remote_dir, rule.directory)
local_rule_dir = os.path.join(data.DATA_DIR, rule.directory)
logging.info(rule.id)
logging.debug("Testing rule directory {0}".format(rule.directory))
args_list = [(s, remote_rule_dir, rule.id)
for s in _get_scenarios(local_rule_dir, rule.files, self.scenarios_regex, self.benchmark_cpes)]
state.map_on_top(self._check_and_record_rule_scenario, args_list)
def _check_and_record_rule_scenario(self, scenario, remote_rule_dir, rule_id):
self._current_result = common.RuleResult()
self._current_result.conditions = common.Scenario_conditions(
self.test_env.name, self.test_env.scanning_mode,
self.remediate_using, self.datastream)
self._current_result.scenario = common.Scenario_run(rule_id, scenario.script)
self._current_result.when = self.test_timestamp_str
self._check_rule_scenario(scenario, remote_rule_dir, rule_id)
self.results.append(self._current_result.save_to_dict())
def _check_rule_scenario(self, scenario, remote_rule_dir, rule_id):
if not _apply_script(
remote_rule_dir, self.test_env.domain_ip, scenario.script):
logging.error("Environment failed to prepare, skipping test")
self._current_result.record_stage_result("preparation", False)
return
self._current_result.record_stage_result("preparation", True)
logging.debug('Using test script {0} with context {1}'
.format(scenario.script, scenario.context))
profiles = get_viable_profiles(
scenario.script_params['profiles'], self.datastream, self.benchmark_id)
test_data = dict(scenario=scenario, rule_id=rule_id)
self.run_test_for_all_profiles(profiles, test_data)
self.executed_tests += 1
def finalize(self):
super(RuleChecker, self).finalize()
with open(os.path.join(LogHelper.LOG_DIR, "results.json"), "w") as f:
json.dump(self.results, f)
def perform_rule_check(options):
checker = RuleChecker(options.test_env)
checker.datastream = options.datastream
checker.benchmark_id = options.benchmark_id
checker.remediate_using = options.remediate_using
checker.dont_clean = options.dont_clean
checker.manual_debug = options.manual_debug
checker.benchmark_cpes = options.benchmark_cpes
checker.scenarios_regex = options.scenarios_regex
checker.test_target(options.target)
```
#### File: scap-security-guide/tests/test_suite.py
```python
from __future__ import print_function
import argparse
import logging
import os
import os.path
import time
import sys
ssg_dir = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(ssg_dir)
from ssg_test_suite.log import LogHelper
import ssg_test_suite.oscap
import ssg_test_suite.test_env
import ssg_test_suite.profile
import ssg_test_suite.rule
from ssg_test_suite import xml_operations
def parse_args():
parser = argparse.ArgumentParser()
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.set_defaults(test_env=None)
backends = common_parser.add_mutually_exclusive_group(required=True)
backends.add_argument(
"--docker", dest="docker", metavar="BASE_IMAGE",
help="Use Docker test environment with this base image.")
backends.add_argument(
"--container", dest="container", metavar="BASE_IMAGE",
help="Use container test environment with this base image.")
backends.add_argument(
"--libvirt", dest="libvirt", metavar="HYPERVISOR DOMAIN", nargs=2,
help="libvirt hypervisor and domain name. "
"Example of a hypervisor domain name tuple: qemu:///system ssg-test-suite")
common_parser.add_argument("--datastream",
dest="datastream",
metavar="DATASTREAM",
required=True,
help=("Path to the Source DataStream on this "
"machine which is going to be tested"))
benchmarks = common_parser.add_mutually_exclusive_group()
benchmarks.add_argument("--xccdf-id",
dest="xccdf_id",
metavar="REF-ID",
default=None,
help="Reference ID related to benchmark to "
"be used.")
benchmarks.add_argument("--xccdf-id-number",
dest="xccdf_id_number",
metavar="REF-ID-SELECT",
type=int,
default=0,
help="Selection number of reference ID related "
"to benchmark to be used.")
common_parser.add_argument("--loglevel",
dest="loglevel",
metavar="LOGLEVEL",
default="INFO",
help="Default level of console output")
common_parser.add_argument("--logdir",
dest="logdir",
metavar="LOGDIR",
default=None,
help="Directory to which all output is saved")
common_parser.add_argument(
"--mode",
dest="scanning_mode",
default="online",
choices=("online", "offline"),
help="What type of check to use - either "
"Online check done by running oscap inside the concerned system, or "
"offline check that examines the filesystem from the host "
"(either may require extended privileges).")
common_parser.add_argument(
"--remediate-using",
dest="remediate_using",
default="oscap",
choices=ssg_test_suite.oscap.REMEDIATION_RULE_RUNNERS.keys(),
help="What type of remediations to use - openscap online one, "
"or remediation done by using remediation roles "
"that are saved to disk beforehand.")
subparsers = parser.add_subparsers(dest="subparser_name",
help="Subcommands: profile, rule")
subparsers.required = True
parser_profile = subparsers.add_parser("profile",
help=("Testing profile-based "
"remediation applied on already "
"installed machine"),
parents=[common_parser])
parser_profile.set_defaults(func=ssg_test_suite.profile.perform_profile_check)
parser_profile.add_argument("target",
nargs="+",
metavar="DSPROFILE",
help=("Profiles to be tested, 'ALL' means every "
"profile of particular benchmark will be "
"evaluated."))
parser_rule = subparsers.add_parser("rule",
help=("Testing remediations of particular "
"rule for various situations - "
"currently not supported "
"by openscap!"),
parents=[common_parser])
parser_rule.set_defaults(func=ssg_test_suite.rule.perform_rule_check)
parser_rule.add_argument("target",
nargs="+",
metavar="RULE",
help=("Rule to be tested, 'ALL' means every "
"rule-testing scenario will be evaluated. Each "
"target is handled as a substring - so you can "
"ask for subset of all rules this way. (If you "
"type ipv6 as a target, all rules containing "
"ipv6 within id will be performed."))
parser_rule.add_argument("--debug",
dest="manual_debug",
action="store_true",
help=("If an error is encountered, all execution "
"on the VM / container will pause to allow "
"debugging."))
parser_rule.add_argument("--dontclean",
dest="dont_clean",
action="store_true",
help="Do not remove html reports of successful runs")
parser_rule.add_argument("--scenarios",
dest="scenarios_regex",
default=None,
help="Regular expression matching test scenarios to run")
return parser.parse_args()
def get_logging_dir(options):
body = 'custom'
if 'ALL' in options.target:
body = 'ALL'
generic_logdir_stem = "{0}-{1}".format(options.subparser_name, body)
if options.logdir is None:
date_string = time.strftime('%Y-%m-%d-%H%M', time.localtime())
logging_dir = os.path.join(
os.getcwd(), 'logs', '{0}-{1}'.format(
generic_logdir_stem, date_string))
logging_dir = LogHelper.find_name(logging_dir)
else:
logging_dir = LogHelper.find_name(options.logdir)
return logging_dir
def _print_available_benchmarks(xccdf_ids, n_xccdf_ids):
logging.info("The DataStream contains {0} Benchmarks".format(n_xccdf_ids))
for i in range(0, n_xccdf_ids):
logging.info("{0} - {1}".format(i, xccdf_ids[i]))
def auto_select_xccdf_id(datastream, bench_number):
xccdf_ids = xml_operations.get_all_xccdf_ids_in_datastream(datastream)
n_xccdf_ids = len(xccdf_ids)
if n_xccdf_ids == 0:
msg = ("The provided DataStream doesn't contain any Benchmark")
raise RuntimeError(msg)
if bench_number < 0 or bench_number >= n_xccdf_ids:
_print_available_benchmarks(xccdf_ids, n_xccdf_ids)
logging.info("Selected Benchmark is {0}".format(bench_number))
msg = ("Please select a valid Benchmark number")
raise RuntimeError(msg)
if n_xccdf_ids > 1:
_print_available_benchmarks(xccdf_ids, n_xccdf_ids)
logging.info("Selected Benchmark is {0}".format(bench_number))
logging.info("To select a different Benchmark, "
"use --xccdf-id-number option.")
return xccdf_ids[bench_number]
def normalize_passed_arguments(options):
if 'ALL' in options.target:
options.target = ['ALL']
if options.xccdf_id is None:
options.xccdf_id = auto_select_xccdf_id(options.datastream,
options.xccdf_id_number)
try:
bench_id = xml_operations.infer_benchmark_id_from_component_ref_id(
options.datastream, options.xccdf_id)
options.benchmark_id = bench_id
except RuntimeError as exc:
msg = "Error inferring benchmark ID from component refId: {}".format(str(exc))
raise RuntimeError(msg)
if options.docker:
options.test_env = ssg_test_suite.test_env.DockerTestEnv(
options.scanning_mode, options.docker)
logging.info(
"The base image option has been specified, "
"choosing Docker-based test environment.")
elif options.container:
options.test_env = ssg_test_suite.test_env.PodmanTestEnv(
options.scanning_mode, options.container)
logging.info(
"The base image option has been specified, "
"choosing Podman-based test environment.")
else:
hypervisor, domain_name = options.libvirt
options.test_env = ssg_test_suite.test_env.VMTestEnv(
options.scanning_mode, hypervisor, domain_name)
logging.info(
"The base image option has not been specified, "
"choosing libvirt-based test environment.")
try:
benchmark_cpes = xml_operations.benchmark_get_applicable_platforms(
options.datastream, options.benchmark_id
)
options.benchmark_cpes = benchmark_cpes
except RuntimeError as exc:
msg = "Error inferring platform from benchmark: {}".format(str(exc))
raise RuntimeError(msg)
def main():
options = parse_args()
log = logging.getLogger()
# this is general logger level - needs to be
# debug otherwise it cuts silently everything
log.setLevel(logging.DEBUG)
LogHelper.add_console_logger(log, options.loglevel)
try:
normalize_passed_arguments(options)
except RuntimeError as exc:
msg = "Error occurred during options normalization: {}".format(str(exc))
logging.error(msg)
sys.exit(1)
# logging dir needs to be created based on other options
# thus we have to postprocess it
logging_dir = get_logging_dir(options)
LogHelper.add_logging_dir(log, logging_dir)
options.func(options)
if __name__ == "__main__":
main()
```
|
{
"source": "jeffcrouse/s3",
"score": 3
}
|
#### File: s3/s3/frame.py
```python
import os
import pandas as pd
import botocore
import boto3
from .funcs import get_both, disk_2_s3, open_file, read
def read_csv(s3_path, *args, **kwargs):
'''
Read a csv file from s3 into memory in a pandas dataframe
'''
return pd.read_csv(s3_path, *args, **kwargs)
def read_json(s3_path, encoding="utf-8", bytes=True, *args, **kwargs):
'''
Read a json file from s3 into memory in a pandas dataframe.
'''
return pd.read_json(s3_path, *args, **kwargs)
def to_csv(df, s3_path, acl='private', *args, **kwargs):
'''
Writes a dataframe to local (transient), then uploads the dataframe to s3.
'''
temp_file_ = s3_path.split('/')[-1]
# note should just ask for kwargs.
df.to_csv(temp_file_, *args, **kwargs)
disk_2_s3(temp_file_, s3_path, acl=acl)
os.remove(temp_file_)
return "File uploaded to '%s'" % s3_path
def to_json(df, s3_path, acl='private', *args, **kwargs):
'''
Writes a dataframe to local (transient), then uploads the dataframe to s3.
'''
temp_file_ = s3_path.split('/')[-1]
df.to_json(temp_file_, *args, **kwargs)
disk_2_s3(temp_file_, s3_path, acl=acl)
os.remove(temp_file_)
return "File uploaded to '%s'" % s3_path
```
|
{
"source": "jeffcrouse/youtube-pi",
"score": 3
}
|
#### File: jeffcrouse/youtube-pi/youtube-pi.py
```python
import sys
import subprocess
import urllib
import requests
import json
import random
import pprint
import time
import os
import os.path
channelId = "UCceEPxhcejQiYza8bvuAA8g" # Eco-Virtual
#channelId = "UCpclRlEJ2oh6JDEJy68UjKA" # an0nymooose
#channelId = "UCzNZlJzmjomyl00ewlxHqOA" # Cool3DWorld
#channelId = "UCYfArGrC66A-vdS45DS7Qrg" # Ace's Adventures
directory = "videos"
if not os.path.exists(directory):
os.makedirs(directory)
pp = pprint.PrettyPrinter(indent=4)
videos = []
YOUTUBE_DATA_API_KEY = os.environ['YOUTUBE_DATA_API_KEY']
def populate():
query = {"channelId": channelId, "maxResults": 50, "part": "snippet,id", "key": YOUTUBE_DATA_API_KEY, "order": "date"};
feed_url = "https://www.googleapis.com/youtube/v3/search?" + urllib.urlencode(query)
r = requests.get(feed_url)
doc = json.loads(r.text)
num_items = len(doc["items"])
for item in doc["items"]:
if "videoId" in item["id"]:
video = {"id": item["id"]["videoId"], "title": item["snippet"]["title"]}
pp.pprint( video )
videos.append( video )
return
def get_video():
video = random.choice(videos)
video_url = "http://www.youtube.com/watch?v=" + video["id"]
path = "%s/%s.mp4" % (directory, video["id"])
if os.path.isfile(path):
return path
print "Downloading {0} to {1}".format(video_url, path)
cmd = ['youtube-dl', '-f', '18', '--output', 'videos/%(id)s.%(ext)s', video_url]
yt = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(res,err) = yt.communicate()
if err:
print(err)
return None
if not os.path.isfile(path):
print "Error while downloading... {0} not found".format(path)
return None
return path
def play_video():
path = get_video()
if(path is not None):
print "Playing {0}".format(path)
cmd = ['omxplayer', '-b', path]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(res,err) = p.communicate()
else:
print("Got a bad video. Sleeping.")
time.sleep( 1 )
def main(argv):
populate()
while True:
play_video()
if __name__ == "__main__":
main(sys.argv[1:])
```
|
{
"source": "jeffcsauer/esda",
"score": 2
}
|
#### File: esda/esda/getisord.py
```python
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>> "
__all__ = ["G", "G_Local"]
import warnings
from libpysal.common import np, stats
from libpysal.weights.spatial_lag import lag_spatial as slag
from libpysal.weights.util import fill_diagonal
from .tabular import _univariate_handler
from .crand import njit as _njit, crand as _crand_plus, _prepare_univariate
PERMUTATIONS = 999
class G(object):
"""
Global G Autocorrelation Statistic
Parameters
----------
y : array (n,1)
Attribute values
w : W
DistanceBand W spatial weights based on distance band
permutations : int
the number of random permutations for calculating pseudo p_values
Attributes
----------
y : array
original variable
w : W
DistanceBand W spatial weights based on distance band
permutation : int
the number of permutations
G : float
the value of statistic
EG : float
the expected value of statistic
VG : float
the variance of G under normality assumption
z_norm : float
standard normal test statistic
p_norm : float
p-value under normality assumption (one-sided)
sim : array
(if permutations > 0)
vector of G values for permutated samples
p_sim : float
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed G is extreme it is either extremely high or extremely low
EG_sim : float
average value of G from permutations
VG_sim : float
variance of G from permutations
seG_sim : float
standard deviation of G under permutations.
z_sim : float
standardized G based on permutations
p_z_sim : float
p-value based on standard normal approximation from
permutations (one-sided)
Notes
-----
Moments are based on normality assumption.
For technical details see :cite:`Getis_2010` and :cite:`Ord_2010`.
Examples
--------
>>> import libpysal
>>> import numpy
>>> numpy.random.seed(10)
Preparing a point data set
>>> points = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a weights object from points
>>> w = libpysal.weights.DistanceBand(points,threshold=15)
>>> w.transform = "B"
Preparing a variable
>>> y = numpy.array([2, 3, 3.2, 5, 8, 7])
Applying Getis and Ord G test
>>> from esda.getisord import G
>>> g = G(y,w)
Examining the results
>>> round(g.G, 3)
0.557
>>> round(g.p_norm, 3)
0.173
"""
def __init__(self, y, w, permutations=PERMUTATIONS):
y = np.asarray(y).flatten()
self.n = len(y)
self.y = y
w.transform = "B"
self.w = w
self.permutations = permutations
self.__moments()
self.y2 = y * y
y = y.reshape(
len(y), 1
) # Ensure that y is an n by 1 vector, otherwise y*y.T == y*y
self.den_sum = (y * y.T).sum() - (y * y).sum()
self.G = self.__calc(self.y)
self.z_norm = (self.G - self.EG) / np.sqrt(self.VG)
self.p_norm = 1.0 - stats.norm.cdf(np.abs(self.z_norm))
if permutations:
sim = [
self.__calc(np.random.permutation(self.y)) for i in range(permutations)
]
self.sim = sim = np.array(sim)
above = sim >= self.G
larger = sum(above)
if (self.permutations - larger) < larger:
larger = self.permutations - larger
self.p_sim = (larger + 1.0) / (permutations + 1.0)
self.EG_sim = sum(sim) / permutations
self.seG_sim = sim.std()
self.VG_sim = self.seG_sim ** 2
self.z_sim = (self.G - self.EG_sim) / self.seG_sim
self.p_z_sim = 1.0 - stats.norm.cdf(np.abs(self.z_sim))
def __moments(self):
y = self.y
n = self.n
w = self.w
n2 = n * n
s0 = w.s0
self.EG = s0 / (n * (n - 1))
s02 = s0 * s0
s1 = w.s1
s2 = w.s2
b0 = (n2 - 3 * n + 3) * s1 - n * s2 + 3 * s02
b1 = (-1.0) * ((n2 - n) * s1 - 2 * n * s2 + 6 * s02)
b2 = (-1.0) * (2 * n * s1 - (n + 3) * s2 + 6 * s02)
b3 = 4 * (n - 1) * s1 - 2 * (n + 1) * s2 + 8 * s02
b4 = s1 - s2 + s02
self.b0 = b0
self.b1 = b1
self.b2 = b2
self.b3 = b3
self.b4 = b4
y2 = y * y
y3 = y * y2
y4 = y2 * y2
EG2 = b0 * (sum(y2) ** 2) + b1 * sum(y4) + b2 * (sum(y) ** 2) * sum(y2)
EG2 += b3 * sum(y) * sum(y3) + b4 * (sum(y) ** 4)
EG2NUM = EG2
EG2DEN = ((sum(y) ** 2 - sum(y2)) ** 2) * n * (n - 1) * (n - 2) * (n - 3)
self.EG2 = EG2NUM / EG2DEN
self.VG = self.EG2 - self.EG ** 2
def __calc(self, y):
yl = slag(self.w, y)
self.num = y * yl
return self.num.sum() / self.den_sum
@property
def _statistic(self):
""" Standardized accessor for esda statistics"""
return self.G
@classmethod
def by_col(
cls, df, cols, w=None, inplace=False, pvalue="sim", outvals=None, **stat_kws
):
"""
Function to compute a G statistic on a dataframe
Parameters
----------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_g'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the G statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
G statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the G statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
"""
return _univariate_handler(
df,
cols,
w=w,
inplace=inplace,
pvalue=pvalue,
outvals=outvals,
stat=cls,
swapname=cls.__name__.lower(),
**stat_kws,
)
class G_Local(object):
"""
Generalized Local G Autocorrelation
Parameters
----------
y : array
variable
w : W
DistanceBand, weights instance that is based on threshold distance
and is assumed to be aligned with y
transform : {'R', 'B'}
the type of w, either 'B' (binary) or 'R' (row-standardized)
permutations : int
the number of random permutations for calculating
pseudo p values
star : boolean or float
whether or not to include focal observation in sums (default: False)
if the row-transformed weight is provided, then this is the default
value to use within the spatial lag. Generally, weights should be
provided in binary form, and standardization/self-weighting will be
handled by the function itself.
Attributes
----------
y : array
original variable
w : DistanceBand W
original weights object
permutations : int
the number of permutations
Gs : array
of floats, the value of the orginal G statistic in Getis & Ord (1992)
EGs : float
expected value of Gs under normality assumption
the values is scalar, since the expectation is identical
across all observations
VGs : array
of floats, variance values of Gs under normality assumption
Zs : array
of floats, standardized Gs
p_norm : array
of floats, p-value under normality assumption (one-sided)
for two-sided tests, this value should be multiplied by 2
sim : array
of arrays of floats (if permutations>0), vector of I values
for permutated samples
p_sim : array
of floats, p-value based on permutations (one-sided)
null - spatial randomness
alternative - the observed G is extreme it is either extremely high or extremely low
EG_sim : array
of floats, average value of G from permutations
VG_sim : array
of floats, variance of G from permutations
seG_sim : array
of floats, standard deviation of G under permutations.
z_sim : array
of floats, standardized G based on permutations
p_z_sim : array
of floats, p-value based on standard normal approximation from
permutations (one-sided)
Notes
-----
To compute moments of Gs under normality assumption,
PySAL considers w is either binary or row-standardized.
For binary weights object, the weight value for self is 1
For row-standardized weights object, the weight value for self is
1/(the number of its neighbors + 1).
For technical details see :cite:`Getis_2010` and :cite:`Ord_2010`.
Examples
--------
>>> import libpysal
>>> import numpy
>>> numpy.random.seed(10)
Preparing a point data set
>>> points = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a weights object from points
>>> w = libpysal.weights.DistanceBand(points,threshold=15)
Preparing a variable
>>> y = numpy.array([2, 3, 3.2, 5, 8, 7])
Applying Getis and Ord local G test using a binary weights object
>>> from esda.getisord import G_Local
>>> lg = G_Local(y,w,transform='B')
Examining the results
>>> lg.Zs
array([-1.0136729 , -0.04361589, 1.31558703, -0.31412676, 1.15373986,
1.77833941])
>>> round(lg.p_sim[0], 3)
0.101
p-value based on standard normal approximation from permutations
>>> round(lg.p_z_sim[0], 3)
0.154
>>> numpy.random.seed(10)
Applying Getis and Ord local G* test using a binary weights object
>>> lg_star = G_Local(y,w,transform='B',star=True)
Examining the results
>>> lg_star.Zs
array([-1.39727626, -0.28917762, 0.65064964, -0.28917762, 1.23452088,
2.02424331])
>>> round(lg_star.p_sim[0], 3)
0.101
>>> numpy.random.seed(12345)
Applying Getis and Ord local G test using a row-standardized weights object
>>> lg = G_Local(y,w,transform='R')
Examining the results
>>> lg.Zs
array([-0.62074534, -0.01780611, 1.31558703, -0.12824171, 0.28843496,
1.77833941])
>>> round(lg.p_sim[0], 3)
0.103
>>> numpy.random.seed(10)
Applying Getis and Ord local G* test using a row-standardized weights object
>>> lg_star = G_Local(y,w,transform='R',star=True)
Examining the results
>>> lg_star.Zs
array([-0.62488094, -0.09144599, 0.41150696, -0.09144599, 0.24690418,
1.28024388])
>>> round(lg_star.p_sim[0], 3)
0.101
"""
def __init__(
self,
y,
w,
transform="R",
permutations=PERMUTATIONS,
star=False,
keep_simulations=True,
n_jobs=-1,
seed=None,
):
y = np.asarray(y).flatten()
self.n = len(y)
self.y = y
w, star = _infer_star_and_structure_w(w, star, transform)
w.transform = transform
self.w_transform = transform
self.w = w
self.permutations = permutations
self.star = star
self.calc()
self.p_norm = 1 - stats.norm.cdf(np.abs(self.Zs))
if permutations:
self.p_sim, self.rGs = _crand_plus(
y,
w,
self.Gs,
permutations,
keep_simulations,
n_jobs=n_jobs,
stat_func=_g_local_star_crand if star else _g_local_crand,
scaling=y.sum(),
seed=seed,
)
if keep_simulations:
self.sim = sim = self.rGs.T
self.EG_sim = sim.mean(axis=0)
self.seG_sim = sim.std(axis=0)
self.VG_sim = self.seG_sim * self.seG_sim
self.z_sim = (self.Gs - self.EG_sim) / self.seG_sim
self.p_z_sim = 1 - stats.norm.cdf(np.abs(self.z_sim))
def __crand(self, keep_simulations):
y = self.y
if keep_simulations:
rGs = np.zeros((self.n, self.permutations))
larger = np.zeros((self.n,))
n_1 = self.n - 1
rid = list(range(n_1))
prange = list(range(self.permutations))
k = self.w.max_neighbors + 1
rids = np.array([np.random.permutation(rid)[0:k] for i in prange])
ids = np.arange(self.w.n)
ido = self.w.id_order
wc = self.__getCardinalities()
if self.w_transform == "r":
den = np.array(wc) + self.star
else:
den = np.ones(self.w.n)
for i in range(self.w.n):
idsi = ids[ids != i]
np.random.shuffle(idsi)
yi_star = y[i] * self.star
wci = wc[i]
rGs_i = (y[idsi[rids[:, 0:wci]]]).sum(1) + yi_star
rGs_i = (np.array(rGs_i) / den[i]) / (self.y_sum - (1 - self.star) * y[i])
if keep_simulations:
rGs[i] = rGs_i
larger[i] = (rGs_i >= self.Gs[i]).sum()
if keep_simulations:
self.rGs = rGs
below = (self.permutations - larger) < larger
larger[below] = self.permutations - larger[below]
self.p_sim = (larger + 1) / (self.permutations + 1)
def __getCardinalities(self):
ido = self.w.id_order
self.wc = np.array([self.w.cardinalities[ido[i]] for i in range(self.n)])
return self.wc
def calc(self):
w = self.w
W = w.sparse
self.y_sum = self.y.sum()
y = self.y
remove_self = not self.star
N = self.w.n - remove_self
statistic = (W @ y) / (y.sum() - y * remove_self)
# ----------------------------------------------------#
# compute moments necessary for analytical inference #
# ----------------------------------------------------#
empirical_mean = (y.sum() - y * remove_self) / N
# variance looks complex, yes, but it obtains from E[x^2] - E[x]^2.
# So, break it down to allow subtraction of the self-neighbor.
mean_of_squares = ((y ** 2).sum() - (y ** 2) * remove_self) / N
empirical_variance = mean_of_squares - empirical_mean ** 2
# Since we have corrected the diagonal, this should work
cardinality = np.asarray(W.sum(axis=1)).squeeze()
expected_value = cardinality / N
expected_variance = (
cardinality
* (N - cardinality)
/ (N - 1)
* (1 / N ** 2)
* (empirical_variance / (empirical_mean ** 2))
)
z_scores = (statistic - expected_value) / np.sqrt(expected_variance)
self.Gs = statistic
self.EGs = expected_value
self.VGs = expected_variance
self.Zs = z_scores
@property
def _statistic(self):
"""Standardized accessor for esda statistics"""
return self.Gs
@classmethod
def by_col(
cls, df, cols, w=None, inplace=False, pvalue="sim", outvals=None, **stat_kws
):
"""
Function to compute a G_Local statistic on a dataframe
Parameters
----------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_g_local'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the G_Local statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
G_Local statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the G_Local statistic.
Returns
-------
pandas.DataFrame
If inplace, None, and operation is conducted on dataframe
in memory. Otherwise, returns a copy of the dataframe with
the relevant columns attached.
"""
return _univariate_handler(
df,
cols,
w=w,
inplace=inplace,
pvalue=pvalue,
outvals=outvals,
stat=cls,
swapname=cls.__name__.lower(),
**stat_kws,
)
def _infer_star_and_structure_w(weights, star, transform):
assert transform.lower() in ("r", "b"), (
f'Transforms must be binary "b" or row-standardized "r".'
f"Recieved: {transform}"
)
adj_matrix = weights.sparse
diagonal = adj_matrix.diagonal()
zero_diagonal = (diagonal == 0).all()
# Gi has a zero diagonal, Gi* has a nonzero diagonal
star = (not zero_diagonal) if star is None else star
# Want zero diagonal but do not have it
if (not zero_diagonal) & (star is False):
weights = fill_diagonal(weights, 0)
# Want nonzero diagonal and have it
elif (not zero_diagonal) & (star is True):
weights = weights
# Want zero diagonal and have it
elif zero_diagonal & (star is False):
weights = weights
# Want nonzero diagonal and do not have it
elif zero_diagonal & (star is True):
# if the input is binary or requested transform is binary,
# set the diagonal to 1.
if transform.lower() == "b" or weights.transform.lower() == "b":
weights = fill_diagonal(weights, 1)
# if we know the target is row-standardized, use the row max
# this works successfully for effectively binary but "O"-transformed input
elif transform.lower() == "r":
# This warning is presented in the documentation as well
warnings.warn(
"Gi* requested, but (a) weights are already row-standardized,"
" (b) no weights are on the diagonal, and"
" (c) no default value supplied to star. Assuming that the"
" self-weight is equivalent to the maximum weight in the"
" row. To use a different default (like, .5), set `star=.5`,"
" or use libpysal.weights.fill_diagonal() to set the diagonal"
" values of your weights matrix and use `star=None` in Gi_Local."
)
weights = fill_diagonal(
weights, np.asarray(adj_matrix.max(axis=1).todense()).flatten()
)
else: # star was something else, so try to fill the weights with it
try:
weights = fill_diagonal(weights, star)
except:
raise TypeError(
f"Type of star ({type(star)}) not understood."
f" Must be an integer, boolean, float, or numpy.ndarray."
)
star = (weights.sparse.diagonal() > 0).any()
weights.transform = transform
return weights, star
# --------------------------------------------------------------
# Conditional Randomization Function Implementations
# --------------------------------------------------------------
@_njit(fastmath=True)
def _g_local_crand(i, z, permuted_ids, weights_i, scaling):
other_weights = weights_i[1:]
zi, zrand = _prepare_univariate(i, z, permuted_ids, other_weights)
return (zrand @ other_weights) / (scaling - zi)
@_njit(fastmath=True)
def _g_local_star_crand(i, z, permuted_ids, weights_i, scaling):
self_weight = weights_i[0]
other_weights = weights_i[1:]
zi, zrand = _prepare_univariate(i, z, permuted_ids, other_weights)
return (zrand @ other_weights + self_weight * zi) / scaling
if __name__ is "__main__":
import geopandas, numpy, esda, importlib
import matplotlib.pyplot as plt
from libpysal import weights, examples
df = geopandas.read_file(examples.get_path("NAT.shp"))
w = weights.Rook.from_dataframe(df)
for transform in ("r", "b"):
for star in (True, False):
test = esda.getisord.G_Local(df.GI89, w, transform=transform, star=star)
out = test._calc2()
(
statistic,
expected_value,
expected_variance,
z_scores,
empirical_mean,
empirical_variance,
) = out
numpy.testing.assert_allclose(statistic, test.Gs)
numpy.testing.assert_allclose(expected_value, test.EGs)
numpy.testing.assert_allclose(expected_variance, test.VGs)
numpy.testing.assert_allclose(z_scores, test.Zs)
numpy.testing.assert_allclose(empirical_mean, test.yl_mean)
numpy.testing.assert_allclose(empirical_variance, test.s2)
# Also check that the None configuration works
test = esda.getisord.G_Local(df.GI89, w, star=None)
```
#### File: esda/tests/test_losh.py
```python
import unittest
import libpysal
from libpysal.common import pandas, RTOL, ATOL
from esda.losh import LOSH
import numpy as np
PANDAS_EXTINCT = pandas is None
class Losh_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(10)
self.w = libpysal.io.open(libpysal.examples.get_path("stl.gal")).read()
f = libpysal.io.open(libpysal.examples.get_path("stl_hom.txt"))
self.y = np.array(f.by_col['HR8893'])
def test_losh(self):
ls = LOSH(connectivity=self.w, inference="chi-square").fit(self.y)
self.assertAlmostEqual(ls.Hi[0], 0.77613471)
self.assertAlmostEqual(ls.pval[0], 0.22802201)
suite = unittest.TestSuite()
test_classes = [
Losh_Tester
]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite)
```
|
{
"source": "jeffdev1111/Fdmaxx",
"score": 2
}
|
#### File: Fdmaxx/FoodMax/views.py
```python
import json
from django.core.mail import send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect, get_object_or_404
from django.views import generic
from .models import *
from carton.cart import Cart
# Create your views here.
def index(request):
coverage = Coverage.objects.all()
dish = Submenu.objects.all()
return render(request, 'foodMax/index.html', context={
'coverage': coverage,
'dish': dish
})
class LocationList(generic.ListView):
model = Coverage
def get(self, request, **kwargs):
coverage = Coverage.objects.all()
return render(request, self.template_name,
{'coverage': coverage})
def post(self, request, **kwargs):
return redirect('restaurant')
template_name = 'foodMax/index.html'
def restaurants(request):
coverage = Coverage.objects.all()
searchWord = request.POST.get('location', '')
coverageIndex = Coverage.objects.filter(name__icontains=searchWord)
coverageDetail = CoverageDetail.objects.filter(coverage__name__icontains=searchWord)
restaurants_found = Restaurant.objects.filter(coverage__name__icontains=searchWord)
number_of_restaurants_found = Restaurant.objects.filter(coverage__name__icontains=searchWord).count()
if searchWord == "":
return redirect('home')
else:
location = "foodMax/restaurants.html"
return render(request, location, context={
'coverageDetail': coverageDetail,
'coverageIndex': coverageIndex,
'search_word': searchWord,
'restaurant': restaurants_found,
'coverage': coverage,
'number_of_restaurants_found': number_of_restaurants_found
})
def restaurants_detail(request, pk):
cart = Cart(request.session)
location = request.GET.get('ln')
restaurant = get_object_or_404(Restaurant, pk=pk)
restaurant_selected_cart = request.session.get('restaurant_cart')
if (pk == restaurant_selected_cart):
check = True
else:
check = False
restaurant_selected_cart_detail = Restaurant.objects.filter(id=restaurant_selected_cart)
coverageDetail = CoverageDetail.objects.filter(restaurant__id=pk).filter(coverage_id=location)
price = cart.total
return render(request, 'foodMax/restaurants_detail.html', context={
'restaurant': restaurant,
'coverageDetail': coverageDetail,
'cart': cart,
'price': price,
'check': check,
'restaurant_selected_cart': restaurant_selected_cart,
'restaurant_selected_cart_detail': restaurant_selected_cart_detail,
})
def add(request):
if request.session.get('restaurant_cart') == None:
response = HttpResponse({"error": "there was an error"})
response.status_code = 403 # To announce that the user isn't allowed to publish
return response
if request.session.get('restaurant_cart'):
restaurant_selected_cart = request.session.get('restaurant_cart')
# restaurant_selected_cart = request.session.get('restaurant_cart', 0)
restaurant = request.GET.get('dr')
if (restaurant == restaurant_selected_cart):
cart = Cart(request.session)
product = Submenu.objects.get(id=request.GET.get('dish_id'))
cart.add(product, price=product.price, )
return HttpResponse("Added")
else:
response = HttpResponse({"error": "there was an error"})
response.status_code = 403 # To announce that the user isn't allowed to publish
return response
else:
restaurant = request.GET.get('dr')
cart = Cart(request.session)
restaurant_selected_cart = request.session.get('restaurant_cart', restaurant)
product = Submenu.objects.get(id=request.GET.get('dish_id'))
cart.add(product, price=product.price, )
return HttpResponse("Added")
def remove(request):
cart = Cart(request.session)
product = Submenu.objects.get(id=request.GET.get('dish_id'))
cart.remove(product)
cart.update_session()
return HttpResponse("Removed")
def add_quantity(request):
cart = Cart(request.session)
product = Submenu.objects.get(id=request.GET.get('dish_id'))
quantity = request.GET.get('quantity')
quantity = int(quantity)
cart.set_quantity(product, quantity + 1)
return HttpResponse("Quantity increased")
def minus_quantity(request):
cart = Cart(request.session)
product = Submenu.objects.get(id=request.GET.get('dish_id'))
quantity = request.GET.get('quantity')
quantity = int(quantity)
cart.set_quantity(product, quantity - 1)
return HttpResponse("Quantity decreased")
def email_order(request):
cart = Cart(request.session).cart_serializable
restaurant = get_object_or_404(Restaurant, pk=request.GET.get('restaurant'))
send_mail('New Order', json.dumps(cart), '<EMAIL>', [restaurant.email, ])
return HttpResponse("Email sent")
def checkout(request):
cart = Cart(request.session)
restaurant_selected_cart = request.session.get('restaurant_cart')
restaurant = Restaurant.objects.filter(id=restaurant_selected_cart)
return render(request, 'foodMax/checkout.html', context={
'restaurant_selected_cart': restaurant_selected_cart,
'restaurant': restaurant,
'cart':cart,
})
def newCart(request):
restaurant = request.GET.get('dr')
cart = Cart(request.session)
request.session['restaurant_cart'] = restaurant
cart.clear()
return HttpResponse("New cart created")
def harlie(request):
return render(request,'foodMax/harlie.html')
```
|
{
"source": "jeff-dh/py_scadparser",
"score": 3
}
|
#### File: jeff-dh/py_scadparser/scad_ast.py
```python
from enum import Enum
class ScadTypes(Enum):
GLOBAL_VAR = 0
MODULE = 1
FUNCTION = 2
USE = 3
INCLUDE = 4
PARAMETER = 5
class ScadObject:
def __init__(self, scadType):
self.scadType = scadType
def getType(self):
return self.scadType
class ScadGlobalVar(ScadObject):
def __init__(self, name):
super().__init__(ScadTypes.GLOBAL_VAR)
self.name = name
class ScadCallable(ScadObject):
def __init__(self, name, parameters, scadType):
super().__init__(scadType)
self.name = name
self.parameters = parameters
def __repr__(self):
return f'{self.name} ({self.parameters})'
class ScadModule(ScadCallable):
def __init__(self, name, parameters):
super().__init__(name, parameters, ScadTypes.MODULE)
class ScadFunction(ScadCallable):
def __init__(self, name, parameters):
super().__init__(name, parameters, ScadTypes.FUNCTION)
class ScadParameter(ScadObject):
def __init__(self, name, optional=False):
super().__init__(ScadTypes.PARAMETER)
self.name = name
self.optional = optional
def __repr__(self):
return self.name + "=None" if self.optional else self.name
```
|
{
"source": "jeffdico/falcon-jinja",
"score": 2
}
|
#### File: falcon-jinja/tests/test_template.py
```python
import falcon
def test_headers(client):
resp = client.simulate_get('/first')
assert resp.headers['content-type'] == falcon.MEDIA_HTML
assert resp.status == falcon.HTTP_200
assert resp.status_code == 200
def test_html_p_tag(client, soup, jinja_context):
resp = client.simulate_get('/first')
soup = soup(resp.text)
assert soup.find('p').text == jinja_context['quote']
def test_html_li_tags(client, soup, jinja_array_context):
resp = client.simulate_get('/second')
html = soup(resp.text)
result = [li.text for li in html.find_all('li')]
assert result == jinja_array_context['frameworks']
```
|
{
"source": "jeff-dillon/faux-snow",
"score": 3
}
|
#### File: faux-snow/test/fauxsnow_test.py
```python
import unittest
from fauxsnow import Resort, ResortModel, Forecast, ForecastPeriod, ForecastModel, ForecastAPILoader, FauxSnow
class TestFS(unittest.TestCase):
TEST_FORECASTS_FILE = 'test/test_forecasts_data.json'
TEST_SKI_RESORTS_FILE = 'data/ski_resorts.json'
def test_get_all_resorts(self):
model = ResortModel()
resorts = model.get_all_resorts(self.TEST_SKI_RESORTS_FILE)
self.assertIsInstance(resorts, list)
self.assertEqual(len(resorts), 25)
for r in resorts:
self.assertIsInstance(r, Resort)
def test_get_resort_by_id(self):
model = ResortModel()
resort = model.get_resort_by_id('snowshoe', self.TEST_SKI_RESORTS_FILE)
self.assertIsInstance(resort, Resort)
self.assertEqual(resort.state, "West Virginia")
def test_get_all_forecasts(self):
model = ForecastModel()
forecasts = model.get_all_forecasts(self.TEST_FORECASTS_FILE)
self.assertIsInstance(forecasts, list)
self.assertEqual(len(forecasts), 17)
for f in forecasts:
self.assertIsInstance(f, Forecast)
self.assertIsInstance(f.periods, list)
self.assertEqual(len(f.periods), 7)
for p in f.periods:
self.assertIsInstance(p, ForecastPeriod)
def test_get_forecast_by_resort_id(self):
model = ForecastModel()
forecast = model.get_forecast_by_resort_id('snowshoe',self.TEST_FORECASTS_FILE)
self.assertIsInstance(forecast, Forecast)
self.assertEqual(forecast.resort_id, 'snowshoe')
self.assertIsInstance(forecast.periods, list)
self.assertEqual(len(forecast.periods), 7)
for p in forecast.periods:
self.assertIsInstance(p, ForecastPeriod)
def test_calc_celcius(self):
fs = FauxSnow()
self.assertEquals(fs.calc_celcius(41), 5)
self.assertEquals(fs.calc_celcius(32), 0)
self.assertEquals(fs.calc_celcius(20), -7)
def test_calc_fahrenheit(self):
fs = FauxSnow()
self.assertEquals(fs.calc_fahrenheit(5), 41)
self.assertEquals(fs.calc_fahrenheit(0), 32)
self.assertEquals(fs.calc_fahrenheit(-7), 19)
def test_is_good_conditions(self):
fs = FauxSnow()
self.assertTrue(fs.conditions_are_good(18,5))
self.assertFalse(fs.conditions_are_good(50,100))
def test_calc_conditions(self):
fs = FauxSnow()
self.assertEqual(fs.calc_conditions("::S",0.2,32,80),"")
self.assertEqual(fs.calc_conditions("::S",0.2,28,10),"Faux")
self.assertEqual(fs.calc_conditions("::S",3.2,32,80),"Snow")
```
|
{
"source": "jeffdjet/robotframework-imaplibrary",
"score": 2
}
|
#### File: src/ImapLibrary/__init__.py
```python
from email import message_from_string
from imaplib import IMAP4, IMAP4_SSL
from re import findall
from time import sleep, time
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
from builtins import str as ustr
from ImapLibrary.version import get_version
__version__ = get_version()
class ImapLibrary(object):
"""ImapLibrary is an email testing library for [http://goo.gl/lES6WM|Robot Framework].
*Deprecated Keywords Warning*
These keywords will be removed in the future 3 to 5 releases.
| *Deprecated Keyword* | *Alternative Keyword* |
| `Open Link From Mail` | `Open Link From Email` |
| `Mark As Read` | `Mark All Emails As Read` |
| `Wait For Mail` | `Wait For Email` |
Example:
| `Open Mailbox` | host=imap.domain.com | user=email@<EMAIL>.<EMAIL> | password=<PASSWORD> |
| ${LATEST} = | `Wait For Email` | sender=<EMAIL> | timeout=300 |
| ${HTML} = | `Open Link From Email` | ${LATEST} | |
| `Should Contain` | ${HTML} | address has been updated | |
| `Close Mailbox` | | | |
Multipart Email Example:
| `Open Mailbox` | host=imap.domain.com | user=<EMAIL> | password=<PASSWORD> |
| ${LATEST} = | `Wait For Email` | sender=<EMAIL> | timeout=300 |
| ${parts} = | `Walk Multipart Email` | ${LATEST} | |
| :FOR | ${i} | IN RANGE | ${parts} |
| \\ | `Walk Multipart Email` | ${LATEST} | |
| \\ | ${ctype} = | `Get Multipart Content Type` | |
| \\ | `Continue For Loop If` | '${ctype}' != 'text/html' | |
| \\ | ${payload} = | `Get Multipart Payload` | decode=True |
| \\ | `Should Contain` | ${payload} | your email |
| \\ | ${HTML} = | `Open Link From Email` | ${LATEST} |
| \\ | `Should Contain` | ${HTML} | Your email |
| `Close Mailbox` | | | |
"""
PORT = 143
PORT_SECURE = 993
FOLDER = 'INBOX'
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = __version__
def __init__(self):
"""ImapLibrary can be imported without argument.
Examples:
| = Keyword Definition = | = Description = |
| Library `|` ImapLibrary | Initiate Imap library |
"""
self._email_index = None
self._imap = None
self._mails = []
self._mp_iter = None
self._mp_msg = None
self._part = None
def close_mailbox(self):
"""Close IMAP email client session.
Examples:
| Close Mailbox |
"""
self._imap.close()
def delete_all_emails(self):
"""Delete all emails.
Examples:
| Delete All Emails |
"""
for mail in self._mails:
self.delete_email(mail)
self._imap.expunge()
def delete_email(self, email_index):
"""Delete email on given ``email_index``.
Arguments:
- ``email_index``: An email index to identity the email message.
Examples:
| Delete Email | INDEX |
"""
self._imap.uid('store', email_index, '+FLAGS', r'(\DELETED)')
self._imap.expunge()
def get_email_body(self, email_index):
"""Returns the decoded email body on multipart email message,
otherwise returns the body text.
Arguments:
- ``email_index``: An email index to identity the email message.
Examples:
| Get Email Body | INDEX |
"""
if self._is_walking_multipart(email_index):
body = self.get_multipart_payload(decode=True)
else:
body = self._imap.uid('fetch',
email_index,
'(BODY[TEXT])')[1][0][1].\
decode('quoted-printable')
return body
def get_links_from_email(self, email_index):
"""Returns all links found in the email body from given ``email_index``.
Arguments:
- ``email_index``: An email index to identity the email message.
Examples:
| Get Links From Email | INDEX |
"""
body = self.get_email_body(email_index)
return findall(r'href=[\'"]?([^\'" >]+)', body)
def get_matches_from_email(self, email_index, pattern):
"""Returns all Regular Expression ``pattern`` found in the email body
from given ``email_index``.
Arguments:
- ``email_index``: An email index to identity the email message.
- ``pattern``: It consists of one or more character literals, operators, or constructs.
Examples:
| Get Matches From Email | INDEX | PATTERN |
"""
body = self.get_email_body(email_index)
return findall(pattern, body)
def get_multipart_content_type(self):
"""Returns the content type of current part of selected multipart email message.
Examples:
| Get Multipart Content Type |
"""
return self._part.get_content_type()
def get_multipart_field(self, field):
"""Returns the value of given header ``field`` name.
Arguments:
- ``field``: A header field name: ``From``, ``To``, ``Subject``, ``Date``, etc.
All available header field names of an email message can be found by running
`Get Multipart Field Names` keyword.
Examples:
| Get Multipart Field | Subject |
"""
return self._mp_msg[field]
def get_multipart_field_names(self):
"""Returns all available header field names of selected multipart email message.
Examples:
| Get Multipart Field Names |
"""
return list(self._mp_msg.keys())
def get_multipart_payload(self, decode=False):
"""Returns the payload of current part of selected multipart email message.
Arguments:
- ``decode``: An indicator flag to decode the email message. (Default False)
Examples:
| Get Multipart Payload |
| Get Multipart Payload | decode=True |
"""
payload = self._part.get_payload(decode=decode)
charset = self._part.get_content_charset()
if charset is not None:
return payload.decode(charset)
return payload
def mark_all_emails_as_read(self):
"""Mark all received emails as read.
Examples:
| Mark All Emails As Read |
"""
for mail in self._mails:
self._imap.uid('store', mail, '+FLAGS', r'\SEEN')
def mark_as_read(self):
"""****DEPRECATED****
Shortcut to `Mark All Emails As Read`.
"""
self.mark_all_emails_as_read()
def mark_email_as_read(self, email_index):
"""Mark email on given ``email_index`` as read.
Arguments:
- ``email_index``: An email index to identity the email message.
Examples:
| Mark Email As Read | INDEX |
"""
self._imap.uid('store', email_index, '+FLAGS', r'\SEEN')
def open_link_from_email(self, email_index, link_index=0):
"""Open link URL from given ``link_index`` in email message body of given ``email_index``.
Returns HTML content of opened link URL.
Arguments:
- ``email_index``: An email index to identity the email message.
- ``link_index``: The link index to be open. (Default 0)
Examples:
| Open Link From Email |
| Open Link From Email | 1 |
"""
urls = self.get_links_from_email(email_index)
if len(urls) > link_index:
resp = urlopen(urls[link_index])
content_type = resp.headers.getheader('content-type')
if content_type:
enc = content_type.split('charset=')[-1]
return ustr(resp.read(), enc)
else:
return resp.read()
else:
raise AssertionError("Link number %i not found!" % link_index)
def open_link_from_mail(self, email_index, link_index=0):
"""****DEPRECATED****
Shortcut to `Open Link From Email`.
"""
return self.open_link_from_email(email_index, link_index)
def open_mailbox(self, **kwargs):
"""Open IMAP email client session to given ``host`` with given ``user`` and ``password``.
Arguments:
- ``host``: The IMAP host server. (Default None)
- ``is_secure``: An indicator flag to connect to IMAP host securely or not. (Default True)
- ``password``: The plaintext password to be use to authenticate mailbox on given ``host``.
- ``port``: The IMAP port number. (Default None)
- ``user``: The username to be use to authenticate mailbox on given ``host``.
- ``folder``: The email folder to read from. (Default INBOX)
Examples:
| Open Mailbox | host=HOST | user=USER | password=<PASSWORD> |
| Open Mailbox | host=HOST | user=USER | password=<PASSWORD> | is_secure=False |
| Open Mailbox | host=HOST | user=USER | password=<PASSWORD> | port=8000 |
| Open Mailbox | host=HOST | user=USER | password=<PASSWORD> | folder=Drafts
"""
host = kwargs.pop('host', kwargs.pop('server', None))
is_secure = kwargs.pop('is_secure', 'True') == 'True'
port = int(kwargs.pop('port', self.PORT_SECURE if is_secure else self.PORT))
folder = '"%s"' % str(kwargs.pop('folder', self.FOLDER))
self._imap = IMAP4_SSL(host, port) if is_secure else IMAP4(host, port)
self._imap.login(kwargs.pop('user', None), kwargs.pop('password', None))
self._imap.select(folder)
self._init_multipart_walk()
def wait_for_email(self, **kwargs):
"""Wait for email message to arrived base on any given filter criteria.
Returns email index of the latest email message received.
Arguments:
- ``poll_frequency``: The delay value in seconds to retry the mailbox check. (Default 10)
- ``recipient``: Email recipient. (Default None)
- ``sender``: Email sender. (Default None)
- ``status``: A mailbox status filter: ``MESSAGES``, ``RECENT``, ``UIDNEXT``,
``UIDVALIDITY``, and ``UNSEEN``.
Please see [https://goo.gl/3KKHoY|Mailbox Status] for more information.
(Default None)
- ``subject``: Email subject. (Default None)
- ``text``: Email body text. (Default None)
- ``timeout``: The maximum value in seconds to wait for email message to arrived.
(Default 60)
- ``folder``: The email folder to check for emails. (Default INBOX)
Examples:
| Wait For Email | sender=<EMAIL> |
| Wait For Email | sender=<EMAIL> | folder=OUTBOX
"""
poll_frequency = float(kwargs.pop('poll_frequency', 10))
timeout = int(kwargs.pop('timeout', 60))
end_time = time() + timeout
while time() < end_time:
self._mails = self._check_emails(**kwargs)
if len(self._mails) > 0:
return self._mails[-1]
if time() < end_time:
sleep(poll_frequency)
raise AssertionError("No email received within %ss" % timeout)
def wait_for_mail(self, **kwargs):
"""****DEPRECATED****
Shortcut to `Wait For Email`.
"""
return self.wait_for_email(**kwargs)
def walk_multipart_email(self, email_index):
"""Returns total parts of a multipart email message on given ``email_index``.
Email message is cache internally to be used by other multipart keywords:
`Get Multipart Content Type`, `Get Multipart Field`, `Get Multipart Field Names`,
`Get Multipart Field`, and `Get Multipart Payload`.
Arguments:
- ``email_index``: An email index to identity the email message.
Examples:
| Walk Multipart Email | INDEX |
"""
if not self._is_walking_multipart(email_index):
data = self._imap.uid('fetch', email_index, '(RFC822)')[1][0][1]
msg = message_from_string(data)
self._start_multipart_walk(email_index, msg)
try:
self._part = next(self._mp_iter)
except StopIteration:
self._init_multipart_walk()
return False
# return number of parts
return len(self._mp_msg.get_payload())
def _check_emails(self, **kwargs):
"""Returns filtered email."""
folder = '"%s"' % str(kwargs.pop('folder', self.FOLDER))
criteria = self._criteria(**kwargs)
# Calling select before each search is necessary with gmail
status, data = self._imap.select(folder)
if status != 'OK':
raise Exception("imap.select error: %s, %s" % (status, data))
typ, msgnums = self._imap.uid('search', None, *criteria)
if typ != 'OK':
raise Exception('imap.search error: %s, %s, criteria=%s' % (typ, msgnums, criteria))
return msgnums[0].split()
@staticmethod
def _criteria(**kwargs):
"""Returns email criteria."""
criteria = []
recipient = kwargs.pop('recipient', kwargs.pop('to_email', kwargs.pop('toEmail', None)))
sender = kwargs.pop('sender', kwargs.pop('from_email', kwargs.pop('fromEmail', None)))
status = kwargs.pop('status', None)
subject = kwargs.pop('subject', None)
text = kwargs.pop('text', None)
if recipient:
criteria += ['TO', '"%s"' % recipient]
if sender:
criteria += ['FROM', '"%s"' % sender]
if subject:
criteria += ['SUBJECT', '"%s"' % subject]
if text:
criteria += ['TEXT', '"%s"' % text]
if status:
criteria += [status]
if not criteria:
criteria = ['UNSEEN']
return criteria
def _init_multipart_walk(self):
"""Initialize multipart email walk."""
self._email_index = None
self._mp_msg = None
self._part = None
def _is_walking_multipart(self, email_index):
"""Returns boolean value whether the multipart email walk is in-progress or not."""
return self._mp_msg is not None and self._email_index == email_index
def _start_multipart_walk(self, email_index, msg):
"""Start multipart email walk."""
self._email_index = email_index
self._mp_msg = msg
self._mp_iter = msg.walk()
```
|
{
"source": "jeffdlb/cesm-lens-aws",
"score": 2
}
|
#### File: cesm-lens-aws/intake-catalogs/catalog-builder.py
```python
import fsspec
import pandas as pd
def build_catalog(bucket='ncar-cesm-lens'):
fs = fsspec.filesystem(protocol='s3', anon=True)
dirs = fs.ls(bucket)
frequencies = []
components = ['ice_nh', 'ice_sh', 'lnd', 'ocn', 'atm']
for d in dirs:
if d.split('/')[-1] in components:
f = fs.ls(d)
frequencies.extend(f[1:])
stores = []
for freq in frequencies:
s = fs.ls(freq)[1:]
stores.extend(s)
print(stores)
entries = []
for store in stores:
try:
path_components = store.split('/')
print(path_components)
component, frequency = path_components[1], path_components[2]
_, experiment, variable = path_components[-1].split('.')[0].split('-')
entry = {
'component': component,
'frequency': frequency,
'experiment': experiment,
'variable': variable,
'path': f's3://{store}',
}
entries.append(entry)
except ValueError:
pass
df = pd.DataFrame(entries)
df.to_csv('aws-cesm1-le.csv', index=False)
if __name__ == '__main__':
build_catalog()
```
|
{
"source": "jeffdotthompson/PyMatching",
"score": 2
}
|
#### File: PyMatching/tests/test_add_noise.py
```python
import networkx as nx
import numpy as np
from scipy.sparse import csr_matrix
from pymatching import Matching
def test_add_noise():
p = 0.1
N = 1000
std = (p*(1-p)/N)**0.5
g = nx.Graph()
for i in range(N):
g.add_edge(i, i+1, qubit_id=i, weight=-np.log(p), error_probability=p)
m = Matching(g)
for i in range(5):
noise, syndrome = m.add_noise()
assert (sum(syndrome) % 2) == 0
assert (p-5*std) * N < sum(noise) < (p+5*std) * N
for i in range(1, N-1):
assert syndrome[i] == (noise[i-1] + noise[i]) % 2
def test_add_noise_with_boundary():
g = nx.Graph()
for i in range(11):
g.add_edge(i, i+1, qubit_id=i, error_probability=(i+1) % 2)
for i in range(5, 12):
g.nodes()[i]['is_boundary'] = True
m = Matching(g)
noise, syndrome = m.add_noise()
assert sum(syndrome) == 5
assert np.array_equal(noise, (np.arange(11)+1) % 2)
assert m.boundary == set(range(5, 12))
assert np.array_equal(
syndrome,
np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0])
)
def test_add_noise_without_error_probabilities_returns_none():
m = Matching(csr_matrix(np.array([[1,1,0],[0,1,1]])))
assert m.add_noise() is None
m = Matching(csr_matrix(np.array([[1,1,0],[0,1,1]])),
error_probabilities=np.array([0.5,0.7,-0.1]))
assert m.add_noise() is None
```
|
{
"source": "jeffdshen/list-ranker",
"score": 3
}
|
#### File: jeffdshen/list-ranker/ranker.py
```python
import argparse
import csv
import pathlib
from elo import Elo
import selectors
def add_args(parser):
parser.add_argument(
"--in_file",
type=str,
required=True,
help="File to read the list of items from (as a txt)."
"Can also read from the out_file as a CSV, which must be in Excel format.",
)
parser.add_argument(
"--out_file",
type=str,
default=None,
help="File to output the list of items as a CSV. Defaults to in_file.csv",
)
parser.add_argument(
"--passes",
type=int,
default=10,
help="The number of passes to loop over the list of items. "
"For each pass, the selected half of each group beats other half.",
)
parser.add_argument(
"--seeding_passes",
type=int,
default=0,
help="Number of seeding passes to use, before proceeding to user-inputted passes. "
"In this mode, group matches are held with higher seeds automatically winning. "
"The seed is determined by the placement of the item in the input file. "
"Only works if the in_file is a txt (not csv).",
)
add_elo_args(parser)
selectors.add_args(parser)
def add_elo_args(parser):
parser.add_argument(
"--elo_base",
type=float,
default=10.0,
help="The base of the exponent for calculating the expected score.",
)
parser.add_argument(
"--elo_scale",
type=float,
default=400.0,
help="The scaling factor in the exponent for calculating expected score.",
)
parser.add_argument(
"--elo_k",
type=float,
default=24.0,
help="The k-factor for Elo updates.",
)
parser.add_argument(
"--elo_default",
type=float,
default=1500.0,
help="Default Elo to assign items at the beginning.",
)
def read_input(in_file, elo_default):
players = {}
if pathlib.PurePath(in_file).suffix == ".csv":
with open(in_file, newline="") as file:
reader = csv.reader(file)
cols = None
for row in reader:
if cols is not None and len(row) != cols:
raise RuntimeError(
f"CSV was ill-formatted, found {len(row)} cols instead of {cols}"
)
cols = len(row)
elo = elo_default
if len(row) > 1:
elo = float(row[1])
players[row[0]] = elo
is_csv = True
else:
with open(in_file) as file:
player_list = [line.strip() for line in file]
player_list = [player for player in player_list if player]
for player in player_list:
players[player] = elo_default
is_csv = False
return players, is_csv
def save_output(out_file, players):
output = sorted(list(players.items()),key=lambda item: item[1], reverse=True)
with open(out_file, "w", newline="") as file:
writer = csv.writer(file)
for player, score in output:
writer.writerow([player, int(score)])
def main():
parser = argparse.ArgumentParser("Rank a list of items using elo.")
add_args(parser)
args = parser.parse_args()
if args.out_file is None:
args.out_file = pathlib.PurePath(args.in_file).with_suffix(".csv")
elo = Elo(args.elo_base, args.elo_scale, args.elo_k)
selector = selectors.get_selector(args.selector_name, args)
players, is_csv = read_input(args.in_file, args.elo_default)
print(f"Loaded {len(players)} items from input file {args.in_file}")
if not is_csv and args.seeding_passes > 0:
player_seeds = {k: v for v, k in enumerate(players.keys())}
print(f"Executing {args.seeding_passes} seeding passes...")
for n in range(args.seeding_passes):
batches = selector.select(players)
results = []
for batch in batches:
winners = sorted(batch, key=lambda p: player_seeds[p])[:len(batch) // 2]
losers = [p for p in batch if p not in winners]
for w in winners:
for l in losers:
results.append((w, l))
elo.update(players, results)
print(f"Updating Elo ratings and saving to output file {args.out_file}")
save_output(args.out_file, players)
for n in range(args.passes):
print(f"Executing pass number {n + 1} out of {args.passes}")
batches = selector.select(players)
results = []
for batch in batches:
print(
"Select your top half or so, in no particular order, comma-separated, using their indices, out of:"
)
for p, player in enumerate(batch):
print(f"{p}. {player} ({int(players[player])})")
try:
winners = input()
winners = winners.split(",")
winners = [winner.strip() for winner in winners]
winners = [int(winner) for winner in winners if winner]
winners = [
winner for winner in winners if winner >= 0 and winner < len(batch)
]
except Exception as e:
print("Skipping batch due to exception:", e)
continue
losers = [i for i in range(len(batch)) if i not in winners]
for w in winners:
for l in losers:
results.append((batch[w], batch[l]))
print(f"Updating Elo ratings and saving to output file {args.out_file}")
elo.update(players, results)
save_output(args.out_file, players)
print("Finished all passes!")
if __name__ == "__main__":
main()
```
#### File: jeffdshen/list-ranker/selectors.py
```python
import random
def get_selector(name, args):
if name == "spaced":
return SpacedSelector(args.selector_min_spacing, args.selector_max_batch)
raise RuntimeError("Unsupported selector: {}".format(name))
def add_args(parser):
parser.add_argument(
"--selector_name",
type=str,
default="spaced",
choices=("spaced",),
help="The name of the selector method to use.",
)
parser.add_argument(
"--selector_max_batch",
type=int,
default=10,
help="The number of items to consider at once.",
)
parser.add_argument(
"--selector_min_spacing",
type=float,
default=4.0,
help="The minimum amount of spacing between items before considering splitting into a new batch.",
)
class SpacedSelector:
def __init__(self, min_spacing, max_batch):
super().__init__()
self.min_spacing = min_spacing
self.max_batch = max_batch
def is_batch_finished(self, players, batch, score):
if len(batch) >= self.max_batch:
return True
spacing = abs(players[batch[0]] - players[batch[-1]]) / len(batch)
if spacing < self.min_spacing:
return False
new_spacing = abs(players[batch[0]] - score) / (len(batch) + 1)
return new_spacing > spacing
def select(self, players):
player_list = list(players.items())
random.shuffle(player_list)
player_list.sort(key=lambda item: item[1], reverse=True)
batches = []
for player, score in player_list:
if not batches or self.is_batch_finished(players, batches[-1], score):
batches.append([])
batches[-1].append(player)
# Handle the last batch
if len(batches) >= 2 and len(batches[-1]) == 1:
batch = batches.pop()
batches[-1] += batch
if len(batches[-1]) > self.max_batch:
batch = batches[-1]
batches[-1] = batch[: len(batch) // 2]
batches.append(batch[len(batch) // 2 :])
return batches
```
|
{
"source": "jeffdshen/squad",
"score": 3
}
|
#### File: squad/datasets/bidaf_squad.py
```python
import torch
import torch.utils.data as data
import numpy as np
class SQuAD(data.Dataset):
"""Stanford Question Answering Dataset (SQuAD).
Each item in the dataset is a tuple with the following entries (in order):
- context_idxs: Indices of the words in the context.
Shape (context_len,).
- context_char_idxs: Indices of the characters in the context.
Shape (context_len, max_word_len).
- question_idxs: Indices of the words in the question.
Shape (question_len,).
- question_char_idxs: Indices of the characters in the question.
Shape (question_len, max_word_len).
- y1: Index of word in the context where the answer begins.
-1 if no answer.
- y2: Index of word in the context where the answer ends.
-1 if no answer.
- id: ID of the example.
Args:
data_path (str): Path to .npz file containing pre-processed dataset.
use_v2 (bool): Whether to use SQuAD 2.0 questions. Otherwise only use SQuAD 1.1.
"""
def __init__(self, data_path, use_v2=True):
super(SQuAD, self).__init__()
dataset = np.load(data_path)
self.context_idxs = torch.from_numpy(dataset["context_idxs"]).long()
self.context_char_idxs = torch.from_numpy(dataset["context_char_idxs"]).long()
self.question_idxs = torch.from_numpy(dataset["ques_idxs"]).long()
self.question_char_idxs = torch.from_numpy(dataset["ques_char_idxs"]).long()
self.y1s = torch.from_numpy(dataset["y1s"]).long()
self.y2s = torch.from_numpy(dataset["y2s"]).long()
if use_v2:
# SQuAD 2.0: Use index 0 for no-answer token (token 1 = OOV)
batch_size, c_len, w_len = self.context_char_idxs.size()
ones = torch.ones((batch_size, 1), dtype=torch.int64)
self.context_idxs = torch.cat((ones, self.context_idxs), dim=1)
self.question_idxs = torch.cat((ones, self.question_idxs), dim=1)
ones = torch.ones((batch_size, 1, w_len), dtype=torch.int64)
self.context_char_idxs = torch.cat((ones, self.context_char_idxs), dim=1)
self.question_char_idxs = torch.cat((ones, self.question_char_idxs), dim=1)
self.y1s += 1
self.y2s += 1
# SQuAD 1.1: Ignore no-answer examples
self.ids = torch.from_numpy(dataset["ids"]).long()
self.valid_idxs = [
idx for idx in range(len(self.ids)) if use_v2 or self.y1s[idx].item() >= 0
]
def __getitem__(self, idx):
idx = self.valid_idxs[idx]
example = (
self.context_idxs[idx],
self.context_char_idxs[idx],
self.question_idxs[idx],
self.question_char_idxs[idx],
self.y1s[idx],
self.y2s[idx],
self.ids[idx],
)
return example
def __len__(self):
return len(self.valid_idxs)
def collate_fn(examples):
"""Create batch tensors from a list of individual examples returned
by `SQuAD.__getitem__`. Merge examples of different length by padding
all examples to the maximum length in the batch.
Args:
examples (list): List of tuples of the form (context_idxs, context_char_idxs,
question_idxs, question_char_idxs, y1s, y2s, ids).
Returns:
examples (tuple): Tuple of tensors (context_idxs, context_char_idxs, question_idxs,
question_char_idxs, y1s, y2s, ids). All of shape (batch_size, ...), where
the remaining dimensions are the maximum length of examples in the input.
Adapted from:
https://github.com/yunjey/seq2seq-dataloader
"""
def merge_0d(scalars, dtype=torch.int64):
return torch.tensor(scalars, dtype=dtype)
def merge_1d(arrays, dtype=torch.int64, pad_value=0):
lengths = [(a != pad_value).sum() for a in arrays]
padded = torch.zeros(len(arrays), max(lengths), dtype=dtype)
for i, seq in enumerate(arrays):
end = lengths[i]
padded[i, :end] = seq[:end]
return padded
def merge_2d(matrices, dtype=torch.int64, pad_value=0):
heights = [(m.sum(1) != pad_value).sum() for m in matrices]
widths = [(m.sum(0) != pad_value).sum() for m in matrices]
padded = torch.zeros(len(matrices), max(heights), max(widths), dtype=dtype)
for i, seq in enumerate(matrices):
height, width = heights[i], widths[i]
padded[i, :height, :width] = seq[:height, :width]
return padded
# Group by tensor type
(
context_idxs,
context_char_idxs,
question_idxs,
question_char_idxs,
y1s,
y2s,
ids,
) = zip(*examples)
# Merge into batch tensors
context_idxs = merge_1d(context_idxs)
context_char_idxs = merge_2d(context_char_idxs)
question_idxs = merge_1d(question_idxs)
question_char_idxs = merge_2d(question_char_idxs)
y1s = merge_0d(y1s)
y2s = merge_0d(y2s)
ids = merge_0d(ids)
return (
context_idxs,
context_char_idxs,
question_idxs,
question_char_idxs,
y1s,
y2s,
ids,
)
```
#### File: squad/datasets/bpe_squad.py
```python
import torch
import torch.utils.data as data
import numpy as np
import random
class MLM(data.IterableDataset):
"""
Each item in the dataset is a tuple with the following entries (in order):
- x: Masked blocks of text, starting with [CLS], separated by [SEP]
- y: Target blocks of text, starting with [CLS], separated by [SEP]
Args:
data_path (str): Path to .npz file containing pre-processed dataset.
max_tokens (int): Range of indices to generate for the random tokens.
"""
def __init__(
self,
data_path,
max_tokens,
epoch_size,
mask_prob=0.15,
unmask_prob=0.1,
randomize_prob=0.1,
block_size=512,
ignore_idx=-1,
padding_idx=0,
cls_idx=1,
sep_idx=2,
mask_idx=3,
):
super(MLM, self).__init__()
self.epoch_size = epoch_size
self.max_tokens = max_tokens
self.mask_prob = mask_prob
self.unmask_prob = unmask_prob
self.randomize_prob = randomize_prob
self.block_size = block_size
self.ignore_idx = ignore_idx
self.padding_idx = padding_idx
self.cls_idx = cls_idx
self.sep_idx = sep_idx
self.mask_idx = mask_idx
self.random_weights = [1] * self.max_tokens
self.random_weights[self.padding_idx] = 0
self.random_weights[self.cls_idx] = 0
self.random_weights[self.sep_idx] = 0
self.random_weights[self.mask_idx] = 0
# Don't need to do ignore_idx, since it should always be outside the range
dataset = np.load(data_path)
self.context_idxs = torch.from_numpy(dataset["context_idxs"]).long()
self.question_idxs = torch.from_numpy(dataset["ques_idxs"]).long()
def mask(self, x, y):
size = x.size(0)
num_mask = int(self.mask_prob * size + random.random())
masks = torch.tensor(random.sample(range(size), num_mask), dtype=torch.long)
change_masks = torch.rand(num_mask)
unmask = change_masks < self.unmask_prob
random_mask = change_masks < (self.randomize_prob + self.unmask_prob)
random_mask = random_mask & (~unmask)
random_content = torch.tensor(
random.choices(
range(self.max_tokens),
weights=self.random_weights,
k=random_mask.sum().item(),
),
dtype=torch.long,
)
masked = torch.tensor([False] * size, dtype=torch.bool)
masked[masks] = True
x[masks[~unmask]] = self.mask_idx
x[masks[random_mask]] = random_content
y[~masked] = self.ignore_idx
return x, y
def __len__(self):
return self.epoch_size
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
worker_id = 0
num_workers = 1
if worker_info is not None:
worker_id = worker_info.id
num_workers = worker_info.num_workers
epoch_size = self.epoch_size // num_workers
next = torch.full((self.block_size,), self.padding_idx, dtype=torch.long)
next[0] = self.cls_idx
next_index = 1
n_samples = 0
while True:
dataset_size = self.context_idxs.size(0)
ids = list(range(worker_id, dataset_size, num_workers))
random.shuffle(ids)
for i in ids:
for j in range(2):
if j == 0:
sample = self.context_idxs[i]
else:
sample = self.question_idxs[i]
sample_length = (sample != self.padding_idx).sum().item()
sample_index = 0
while sample_index < sample_length:
fill = min(
sample_length - sample_index, next.size(0) - next_index
)
next[next_index : next_index + fill] = sample[
sample_index : sample_index + fill
]
next_index += fill
sample_index += fill
if next_index >= next.size(0):
x = next.clone().detach()
y = next.clone().detach()
yield self.mask(x, y)
next = torch.full(
(self.block_size,), self.padding_idx, dtype=torch.long
)
next[0] = self.cls_idx
next_index = 1
n_samples += 1
if n_samples >= epoch_size:
return
else:
next[next_index] = self.sep_idx
next_index += 1
def collate_fn(examples):
# Group by tensor type
x, y = zip(*examples)
return torch.stack(x, dim=0), torch.stack(y, dim=0)
class SQuAD(data.Dataset):
"""Stanford Question Answering Dataset (SQuAD).
Each item in the dataset is a tuple with the following entries (in order):
- x: [CLS] context window [SEP] question
- y: start and end indices, adjusted to the context window
- c_padding_mask: mask out [SEP] question (True) or keep [CLS] context window (False)
- ids: ids for each entry
Args:
data_path (str): Path to .npz file containing pre-processed dataset.
"""
def __init__(
self,
data_path,
block_size=512,
ignore_idx=-1,
padding_idx=0,
cls_idx=1,
sep_idx=2,
mask_idx=3,
use_v2=True,
):
super(SQuAD, self).__init__()
self.block_size = block_size
self.ignore_idx = ignore_idx
self.padding_idx = padding_idx
self.cls_idx = cls_idx
self.sep_idx = sep_idx
self.mask_idx = mask_idx
dataset = np.load(data_path)
self.context_idxs = torch.from_numpy(dataset["context_idxs"]).long()
self.question_idxs = torch.from_numpy(dataset["ques_idxs"]).long()
self.y1s = torch.from_numpy(dataset["y1s"]).long()
self.y2s = torch.from_numpy(dataset["y2s"]).long()
self.ids = torch.from_numpy(dataset["ids"]).long()
self.valid_idxs = [
idx for idx in range(len(self.ids)) if use_v2 or self.y1s[idx].item() >= 0
]
def __getitem__(self, idx):
idx = self.valid_idxs[idx]
example = (
self.context_idxs[idx],
self.question_idxs[idx],
self.y1s[idx],
self.y2s[idx],
self.ids[idx],
)
return example
def __len__(self):
return len(self.valid_idxs)
def get_sliding_window_collate(self, stride, randomize):
"""
Gets a collate function which creates inputs at most the block size.
If randomize is True, we get a single random sliding window (for training/dev).
Otherwise, we keep all the sliding windows (for evaluation).
"""
def sliding_window_collate(examples):
windows = []
for example in examples:
c, q, y1, y2, id = example
c_len = (c != self.padding_idx).sum()
q_len = (q != self.padding_idx).sum()
# We want to keep going so long as c_end = c_start + (block_size - q_len - 2)
# has not been at least c_len for the first time, i.e. c_end < c_len + stride.
# We also want to take at least one step.
c_range = range(
0, max(1, c_len + q_len + 2 - self.block_size + stride), stride
)
if randomize:
c_start = random.sample(c_range, k=1)[0]
c_range = range(c_start, c_start + 1)
for c_start in c_range:
c_end = min(self.block_size - q_len - 2 + c_start, c_len)
if y1 < c_start or y2 < c_start or y1 >= c_end or y2 >= c_end:
y1 = -1
y2 = -1
else:
y1 -= c_start
y2 -= c_start
windows.append((c[c_start:c_end], q[:q_len], y1, y2, c_start, id))
# Collate windows
max_len = max(len(window[0]) + len(window[1]) + 2 for window in windows)
assert max_len <= self.block_size
x = torch.full((len(windows), max_len), self.padding_idx, dtype=torch.long)
y = torch.zeros(len(windows), 2, dtype=torch.long)
c_padding_mask = torch.ones(len(windows), max_len, dtype=torch.bool)
c_starts = torch.zeros(len(windows), dtype=torch.long)
ids = torch.zeros(len(windows), dtype=torch.long)
for i, window in enumerate(windows):
c, q, y1, y2, c_start, id = window
x[i, 0] = self.cls_idx
x[i, 1 : 1 + len(c)] = c
x[i, 1 + len(c)] = self.sep_idx
x[i, 2 + len(c) : 2 + len(c) + len(q)] = q
c_padding_mask[i][0 : 1 + len(c)] = False
y[i, 0] = y1 + 1
y[i, 1] = y2 + 1
c_starts[i] = c_start
ids[i] = id
return x, y, c_padding_mask, c_starts, ids
return sliding_window_collate
class QuestionsMLM(data.Dataset):
"""
Args:
data_path (str): Path to .npz file containing pre-processed dataset.
max_tokens (int): Range of indices to generate for the random tokens.
"""
def __init__(
self,
data_path,
max_tokens,
mask_prob=0.15,
unmask_prob=0.1,
randomize_prob=0.1,
ignore_idx=-1,
padding_idx=0,
cls_idx=1,
sep_idx=2,
mask_idx=3,
use_v2=True,
):
super().__init__()
self.max_tokens = max_tokens
self.mask_prob = mask_prob
self.unmask_prob = unmask_prob
self.randomize_prob = randomize_prob
self.ignore_idx = ignore_idx
self.padding_idx = padding_idx
self.cls_idx = cls_idx
self.sep_idx = sep_idx
self.mask_idx = mask_idx
self.random_weights = [1] * self.max_tokens
self.random_weights[self.padding_idx] = 0
self.random_weights[self.cls_idx] = 0
self.random_weights[self.sep_idx] = 0
self.random_weights[self.mask_idx] = 0
# Don't need to do ignore_idx, since it should always be outside the range
dataset = np.load(data_path)
self.context_idxs = torch.from_numpy(dataset["context_idxs"]).long()
self.question_idxs = torch.from_numpy(dataset["ques_idxs"]).long()
self.y1s = torch.from_numpy(dataset["y1s"]).long()
self.y2s = torch.from_numpy(dataset["y2s"]).long()
self.ids = torch.from_numpy(dataset["ids"]).long()
self.valid_idxs = [
idx for idx in range(len(self.ids)) if use_v2 or self.y1s[idx].item() >= 0
]
self.max_id = torch.max(self.ids) + 1
def mask(self, x, y):
size = (x != self.padding_idx).sum().item()
num_mask = int(self.mask_prob * size + random.random())
masks = torch.tensor(random.sample(range(size), num_mask), dtype=torch.long)
change_masks = torch.rand(num_mask)
unmask = change_masks < self.unmask_prob
random_mask = change_masks < (self.randomize_prob + self.unmask_prob)
random_mask = random_mask & (~unmask)
random_content = torch.tensor(
random.choices(
range(self.max_tokens),
weights=self.random_weights,
k=random_mask.sum().item(),
),
dtype=torch.long,
)
masked = torch.tensor([False] * x.size(0), dtype=torch.bool)
masked[masks] = True
x[masks[~unmask]] = self.mask_idx
x[masks[random_mask]] = random_content
y[~masked] = self.ignore_idx
return x, y
def __getitem__(self, idx):
idx = self.valid_idxs[idx]
x = torch.full((self.question_idxs.size(-1) + 1,), self.padding_idx, dtype=torch.long)
x[0] = self.cls_idx
x[1:] = self.question_idxs[idx]
x = x.clone().detach()
y = x.clone().detach()
x, y = self.mask(x, y)
return x, y, self.context_idxs[idx], self.ids[idx]
def __len__(self):
return len(self.valid_idxs)
@staticmethod
def get_collate_fn():
def mlm_collate_fn(examples):
# Group by tensor type
x, y, c, ids = zip(*examples)
return torch.stack(x, dim=0), torch.stack(y, dim=0), torch.stack(c, dim=0), torch.stack(ids, dim=0)
return mlm_collate_fn
```
#### File: squad/preprocess/bpe_aug_setup.py
```python
import numpy as np
import os
import ujson as json
from codecs import open
from collections import defaultdict
from tqdm import tqdm
from preprocess.bpe import BPE
from zipfile import ZipFile
def load_features(out_file):
dataset = np.load(out_file)
return (
dataset["context_idxs"],
dataset["ques_idxs"],
dataset["y1s"],
dataset["y2s"],
dataset["ids"],
)
def load_aug(aug_file):
with open(aug_file, "r") as file:
return json.load(file)
def unzip(output_path):
if os.path.exists(output_path):
return
zip_path = output_path + ".zip"
if os.path.exists(zip_path):
print(f"Unzipping {zip_path}...")
with ZipFile(zip_path, "r") as zip_fh:
zip_fh.extractall(os.path.dirname(output_path))
def build_features(data_type, out_file, aug_file, bpe):
print(f"Adding {data_type} aug examples...")
context_idxs, ques_idxs, y1s, y2s, ids = load_features(out_file)
id_map = {}
for i, id in enumerate(ids):
assert str(id) not in id_map
id_map[str(id)] = i
total = ids.max()
total_unfiltered = total
ques_map = defaultdict(list)
for i, ques in enumerate(ques_idxs):
ques = [token for token in ques.tolist() if token != 0]
ques = bpe.decode(ques)
ques_map[ques].append(i)
ques_limit = ques_idxs.shape[1]
unzip(aug_file)
aug_examples = load_aug(aug_file)
meta = {}
aug_context_idxs = []
aug_ques_idxs = []
aug_y1s = []
aug_y2s = []
aug_ids = []
gold_filtered = 0
dup_filtered = 0
for id, example in tqdm(aug_examples.items()):
gold_question = example["gold_question"]
aug_questions = example["aug_questions"]
aug_question_texts = example["aug_question_texts"]
assert len(gold_question) == ques_limit
assert gold_question == ques_idxs[id_map[id]].tolist()
aug_context = context_idxs[id_map[id]]
for i, ques in enumerate(aug_questions):
total_unfiltered += 1
assert len(ques) == ques_limit
# filter out if it equals the gold question
if ques == gold_question:
gold_filtered += 1
continue
# filter out if it equals another question with the same context
ques_text = aug_question_texts[i]
skip = False
for j in ques_map[ques_text]:
if np.array_equal(context_idxs[j], aug_context):
skip = True
break
if skip:
dup_filtered += 1
continue
total += 1
aug_context_idxs.append(aug_context)
aug_ques_idxs.append(ques)
aug_y1s.append(-1)
aug_y2s.append(-1)
aug_ids.append(total)
aug_context_idxs = np.array(aug_context_idxs)
aug_ques_idxs = np.array(aug_ques_idxs)
aug_y1s = np.array(aug_y1s)
aug_y2s = np.array(aug_y2s)
aug_ids = np.array(aug_ids)
context_idxs = np.concatenate((context_idxs, aug_context_idxs), axis=0)
ques_idxs = np.concatenate((ques_idxs, aug_ques_idxs), axis=0)
y1s = np.concatenate((y1s, aug_y1s), axis=0)
y2s = np.concatenate((y2s, aug_y2s), axis=0)
ids = np.concatenate((ids, aug_ids), axis=0)
np.savez_compressed(
out_file,
context_idxs=context_idxs,
ques_idxs=ques_idxs,
y1s=y1s,
y2s=y2s,
ids=ids,
)
print(f"Built {total} instances of features in total")
meta["total_unfiltered"] = total_unfiltered
meta["total"] = total
meta["gold_filtered"] = gold_filtered
meta["dup_filtered"] = dup_filtered
return meta
def get_bpe(args):
bpe = BPE()
with open(args.bpe_file, "r") as file:
bpe.load_state_dict(json.load(file))
return bpe
def preprocess(args):
# Process training set and use it to construct bpe
bpe = get_bpe(args)
train_meta = build_features(
"train", args.train_record_file, args.train_aug_file, bpe
)
print("Meta: {}".format(train_meta))
def setup(args):
preprocess(args)
def add_args(parser):
return parser
```
#### File: squad/trainer/roberta_augment.py
```python
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import torch.cuda.amp as amp
from collections import OrderedDict
from tqdm import tqdm
import ujson as json
from models import RoBERTa
from datasets.bpe_squad import QuestionsMLM, collate_fn, MLM
from preprocess.bpe import BPE
import trainer.trainer as base_trainer
import trainer.util as util
import trainer.stats as stats
import models.transformer as T
import trainer.scheduler as sched
def add_special_tokens(args):
args.ignore_idx = -1
args.padding_idx = 0
args.cls_idx = 1
args.sep_idx = 2
args.mask_idx = 3
def get_args(args):
# Compute derived args values
device, args.gpu_ids = util.get_available_devices()
args.batch_size_per_gpu = args.batch_size
args.batch_size *= max(1, len(args.gpu_ids))
return args, device
def get_num_steps(args):
args.num_steps = args.epoch_size // args.batch_size // args.gradient_accumulation
if args.num_epochs >= 0:
args.num_steps *= args.num_epochs
if args.decay_forever:
args.num_steps = float("inf")
return args.num_steps
def get_bpe(args):
bpe = BPE()
with open(args.bpe_file, "r") as file:
bpe.load_state_dict(json.load(file))
add_special_tokens(args)
return bpe
def get_dataset(args, file, bpe, shuffle):
# Don't need to supply special idxs, since they're the same.
dataset = QuestionsMLM(
file,
max_tokens=len(bpe),
mask_prob=args.mask_prob,
unmask_prob=args.unmask_prob,
randomize_prob=args.randomize_prob,
use_v2=args.use_squad_v2,
)
loader = data.DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.num_workers,
collate_fn=dataset.get_collate_fn(),
)
return dataset, loader
def get_model(args, bpe):
model = RoBERTa(
dim=args.dim,
n_heads=args.n_heads,
ff_dim=args.ff_dim,
activation=args.activation,
dropout=args.dropout,
attn_dropout=args.attn_dropout,
act_dropout=args.act_dropout,
n_layers=args.n_layers,
max_positions=args.max_positions,
max_tokens=len(bpe),
padding_idx=args.padding_idx,
ignore_idx=args.ignore_idx,
prenorm=args.prenorm,
qa=False,
)
return model
def train(args):
trainer = base_trainer.Trainer()
args, device = get_args(args)
args, log, tbx = trainer.setup(args)
# Get BPE
log.info("Loading BPE...")
bpe = get_bpe(args)
log.info("Loaded {} BPE tokens".format(len(bpe)))
# Get data loader
log.info("Building dataset...")
train_dataset, train_loader = get_dataset(
args, args.train_record_file, bpe, shuffle=True
)
dev_dataset, dev_loader = get_dataset(
args, args.train_record_file, bpe, shuffle=False
)
args.epoch_size = len(train_dataset)
log.info("Train has {} examples".format(args.epoch_size))
# Get model
log.info("Building model...")
model = get_model(args, bpe)
model = trainer.setup_model(model, device)
# Get optimizer, scheduler, and scaler
optimizer = optim.AdamW(
model.parameters(),
args.lr,
betas=(args.beta_1, args.beta_2),
eps=args.eps,
weight_decay=args.l2_wd,
)
get_num_steps(args)
log.info("Scheduler will decay over {} steps".format(args.num_steps))
scheduler = sched.get_linear_warmup_power_decay_scheduler(
optimizer, args.warmup_steps, args.num_steps, power=args.power_decay
)
scaler = amp.GradScaler()
optimizer, scheduler, scaler = trainer.setup_optimizer(optimizer, scheduler, scaler)
# Train
log.info("Training...")
model.train()
sample_num = 0
samples_till_eval = args.eval_per_n_samples
epoch = 0
step = 0
trainer.setup_saver()
trainer.setup_random()
sample_num, samples_till_eval, epoch, step = trainer.setup_step(
step_vars=(sample_num, samples_till_eval, epoch, step)
)
trainer.setup_close()
while epoch != args.num_epochs:
trainer.save_checkpoint(step_vars=(sample_num, samples_till_eval, epoch, step))
epoch += 1
log.info(f"Starting epoch {epoch}...")
# Print histogram of weights every epoch
for tags, params in model.named_parameters():
tbx.add_histogram(tags, params.data, epoch)
with torch.enable_grad(), tqdm(total=len(train_loader.dataset)) as progress_bar:
for x, y, _, _ in train_loader:
batch_size = x.size(0)
loss, loss_val, _ = forward(x, y, args, device, model)
loss = loss / args.gradient_accumulation
# Backward
scaler.scale(loss).backward()
if (step + 1) % args.gradient_accumulation == 0:
scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
optimizer.zero_grad()
# Log info
step += 1
sample_num += batch_size
progress_bar.update(batch_size)
progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
tbx.add_scalar("train/NLL", loss_val, sample_num)
tbx.add_scalar("train/LR", optimizer.param_groups[0]["lr"], sample_num)
tbx.add_scalar(
"train/steps", step // args.gradient_accumulation, sample_num
)
results, augs = augment(model, dev_loader, device, bpe, args)
for k, v in results.items():
tbx.add_scalar(f"dev/{k}", v, sample_num)
save(args.train_aug_file, augs, "train aug")
def save(filename, obj, message=None):
if message is not None:
print(f"Saving {message}...")
with open(filename, "w") as fh:
json.dump(obj, fh)
else:
raise RuntimeError("Message missing")
def forward(x, y, args, device, model, autocast=True):
# Setup for forward
x = x.to(device)
padding_mask = T.get_padding_mask(x, args.padding_idx)
# Forward
with amp.autocast(enabled=autocast):
scores = model(x, padding_mask=padding_mask)
scores = model.module.mask_scores(scores, padding_mask)
y = y.to(device)
loss = model.module.get_loss(scores, y)
loss_val = loss.item()
return loss, loss_val, scores
def sample_mlm_pred(model, x, y, scores, args):
scores = scores.clone().detach()
# Don't generate special tokens
scores[:, :, args.padding_idx] = float("-inf")
scores[:, :, args.cls_idx] = float("-inf")
scores[:, :, args.sep_idx] = float("-inf")
scores[:, :, args.mask_idx] = float("-inf")
ans = y.clone().detach()
mask = y != args.ignore_idx
ans[~mask] = x[~mask]
pred = x.clone().detach()
pred[mask] = model.sample(
scores[mask, :], 1, alpha=args.sample_temperature
).squeeze(-1)
acc = (pred[mask] == y[mask]).float().mean().item()
ans = ans[:, 1:].detach().cpu().numpy()
pred = pred[:, 1:].detach().cpu().numpy()
return pred, ans, acc
def augment(model, data_loader, device, bpe, args):
nll_meter = stats.AverageMeter()
acc_meter = stats.AverageMeter()
model.eval()
augs = {}
with torch.no_grad(), tqdm(
total=len(data_loader.dataset) * args.augment_samples
) as progress_bar:
for _ in range(args.augment_samples):
for x, y, c, ids in data_loader:
batch_size = x.size(0)
_, loss_val, scores = forward(x, y, args, device, model)
nll_meter.update(loss_val, batch_size)
x = x.to(device)
y = y.to(device)
pred, ans, acc = sample_mlm_pred(model.module, x, y, scores, args)
acc_meter.update(acc, batch_size)
progress_bar.update(batch_size)
progress_bar.set_postfix(NLL=nll_meter.avg)
for i, id in enumerate(ids.tolist()):
if str(id) not in augs:
augs[str(id)] = {
"gold_question": "",
"aug_questions": [],
"gold_question_text": "",
"aug_question_texts": [],
}
aug = augs[str(id)]
gold = ans[i].tolist()
aug_q = pred[i].tolist()
assert not aug["gold_question"] or aug["gold_question"] == gold
aug["gold_question"] = gold
aug["aug_questions"].append(aug_q)
aug["gold_question_text"] = bpe.decode(
[token for token in gold if token != args.padding_idx]
)
aug["aug_question_texts"].append(
bpe.decode(
[token for token in aug_q if token != args.padding_idx]
)
)
model.train()
results_list = [("NLL", nll_meter.avg), ("acc", acc_meter.avg)]
results = OrderedDict(results_list)
return results, augs
def add_mlm_args(parser):
parser.add_argument(
"--mask_prob", type=float, default=0.15, help="Mask probability."
)
parser.add_argument(
"--unmask_prob",
type=float,
default=0.25,
help="Probability to leave mask unchanged.",
)
parser.add_argument(
"--randomize_prob",
type=float,
default=0.25,
help="Probability to use a random token instead of mask.",
)
def add_aug_args(parser):
parser.add_argument(
"--augment_samples",
type=int,
default=2,
help="Number of augmented samples to generate per question",
)
parser.add_argument(
"--sample_temperature", type=float, default=1.0, help="Sample temperature."
)
def add_train_args(parser):
"""Add arguments needed in train.py."""
add_train_test_args(parser)
base_trainer.add_train_args(parser)
add_mlm_args(parser)
add_aug_args(parser)
parser.add_argument(
"--eval_per_n_samples",
type=int,
default=12500,
help="Number of samples between successive evaluations.",
)
parser.add_argument(
"--gradient_accumulation",
type=int,
default=4,
)
parser.add_argument("--lr", type=float, default=0.025, help="Learning rate.")
parser.add_argument(
"--warmup_steps", type=float, default=500, help="Warmup optimizer steps."
)
parser.add_argument(
"--power_decay", type=float, default=-0.5, help="Power of the decay."
)
parser.add_argument(
"--decay_forever",
type=lambda s: s.lower().startswith("t"),
default=True,
help="Whether the decay should reach end_lr at the end of training, or in the limit to infinity",
)
parser.add_argument("--l2_wd", type=float, default=0.01, help="AdamW weight decay.")
parser.add_argument("--eps", type=float, default=1e-6, help="Adam epsilon.")
parser.add_argument("--beta_1", type=float, default=0.9, help="Adam beta_1.")
parser.add_argument("--beta_2", type=float, default=0.98, help="Adam beta_2.")
parser.add_argument(
"--num_epochs",
type=int,
default=2,
help="Number of epochs for which to train. Negative means forever.",
)
parser.add_argument(
"--metric_name",
type=str,
default="NLL",
help="Name of dev metric to determine best checkpoint.",
)
parser.add_argument(
"--max_grad_norm",
type=float,
default=5.0,
help="Maximum gradient norm for gradient clipping.",
)
def add_train_test_args(parser):
parser.add_argument(
"--num_workers",
type=int,
default=4,
help="Number of sub-processes to use per data loader.",
)
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size per GPU. Scales automatically when \
multiple GPUs are available.",
)
parser.add_argument(
"--use_squad_v2",
type=lambda s: s.lower().startswith("t"),
default=True,
help="Whether to use SQuAD 2.0 (unanswerable) questions.",
)
# Model params
parser.add_argument(
"--dim",
type=int,
default=768,
help="Embedding dimension.",
)
parser.add_argument(
"--n_heads",
type=int,
default=12,
help="Attention heads.",
)
parser.add_argument(
"--ff_dim",
type=int,
default=3072,
help="Feedforward dimension.",
)
parser.add_argument(
"--activation",
choices=["relu", "gelu"],
default="gelu",
help="Feedforward activation function.",
)
parser.add_argument(
"--dropout",
type=float,
default=0.1,
help="Dropout probability.",
)
parser.add_argument(
"--attn_dropout",
type=float,
default=0.1,
help="Dropout probability for attention weights within self attn.",
)
parser.add_argument(
"--act_dropout",
type=float,
default=0.0,
help="Dropout probability after activation within FF.",
)
parser.add_argument(
"--n_layers",
type=int,
default=12,
help="Number of layers.",
)
parser.add_argument(
"--max_positions",
type=int,
default=512,
help="Maximum number of tokens.",
)
parser.add_argument(
"--prenorm",
type=lambda s: s.lower().startswith("t"),
default=False,
help="Whether to put LayerNorm after the residual or before.",
)
```
#### File: squad/trainer/trainer.py
```python
import numpy as np
import random
import torch
import torch.nn as nn
import os
import trainer.util as util
from torch.utils.tensorboard import SummaryWriter
from json import dumps
import queue
import shutil
class Trainer:
def __init__(self, is_train=True):
super().__init__()
self.state_dict = None
self.is_train = is_train
def setup(self, args):
log = util.get_logger(args.save_dir, args.name)
tbx = SummaryWriter(args.save_dir)
if self.is_train and args.resume_dir:
checkpoint_path = os.path.join(args.resume_dir, "checkpoint.pth.tar")
self.state_dict = torch.load(checkpoint_path)
self.args = self.state_dict["args"]
self.args.save_dir = args.save_dir
self.args.name = args.name
args = self.args
log.info("Resuming from checkpoint: {}".format(checkpoint_path))
self.args = args
log.info(f"Args: {dumps(vars(args), indent=4, sort_keys=True)}")
if self.is_train:
log.info(f"Using random seed {args.seed}...")
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
self.log = log
self.tbx = tbx
return args, log, tbx
def setup_saver(self):
args = self.args
log = self.log
self.saver = ModelSaver(
args.save_dir,
max_checkpoints=args.max_checkpoints,
metric_name=args.metric_name,
maximize_metric=args.maximize_metric,
log=log,
)
def setup_random(self):
log = self.log
if self.state_dict is not None:
log.info("Reloading random state...")
random.setstate(self.state_dict["random"])
np.random.set_state(self.state_dict["np.random"])
torch.set_rng_state(self.state_dict["torch.random"])
torch.cuda.set_rng_state_all(self.state_dict["torch.cuda.random"])
def setup_model(self, model, device):
log = self.log
args = self.args
if self.state_dict is not None:
log.info("Reloading model...")
model.load_state_dict(self.state_dict["model"])
elif args.load_path or not self.is_train:
log.info(f"Loading model from {args.load_path}...")
model, _ = ModelSaver.load_model(
model, args.load_path, device, strict=(not self.is_train)
)
model = nn.DataParallel(model, self.args.gpu_ids)
model = model.to(device)
self.model = model
self.device = device
log.info(model)
return model
def setup_optimizer(self, optimizer, scheduler, scaler):
log = self.log
if self.state_dict is not None:
log.info("Reloading optimizer, scheduler, scaler...")
optimizer.load_state_dict(self.state_dict["optimizer"])
scheduler.load_state_dict(self.state_dict["scheduler"])
scaler.load_state_dict(self.state_dict["scaler"])
self.optimizer = optimizer
self.scheduler = scheduler
self.scaler = scaler
return optimizer, scheduler, scaler
def setup_step(self, step_vars):
log = self.log
if self.state_dict is not None:
log.info("Reloading step: {}".format(self.state_dict["step"]))
return self.state_dict["step"]
return step_vars
def setup_close(self):
self.state_dict = None
def save_checkpoint(self, step_vars):
ckpt_dict = {
"args": self.args,
"random": random.getstate(),
"np.random": np.random.get_state(),
"torch.random": torch.random.get_rng_state(),
"torch.cuda.random": torch.cuda.get_rng_state_all(),
"model": self.model.module.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"scaler": self.scaler.state_dict(),
"step": step_vars,
}
checkpoint_path = os.path.join(self.args.save_dir, "checkpoint.pth.tar")
torch.save(ckpt_dict, checkpoint_path)
def save_best(self, step, metric_val):
self.saver.save(step, self.model.module, metric_val)
def add_train_args(parser):
parser.add_argument(
"--max_checkpoints",
type=int,
default=5,
help="Maximum number of checkpoints to keep on disk.",
)
parser.add_argument(
"--resume_dir",
type=str,
default=None,
help="Path to trainer checkpoint.",
)
parser.add_argument(
"--seed", type=int, default=224, help="Random seed for reproducibility."
)
class ModelSaver:
"""Class to save and load model checkpoints.
Save the best checkpoints as measured by a metric value passed into the
`save` method. Overwrite checkpoints with better checkpoints once
`max_checkpoints` have been saved.
Author:
<NAME> (<EMAIL>)
Args:
save_dir (str): Directory to save checkpoints.
max_checkpoints (int): Maximum number of checkpoints to keep before
overwriting old ones.
metric_name (str): Name of metric used to determine best model.
maximize_metric (bool): If true, best checkpoint is that which maximizes
the metric value passed in via `save`. Otherwise, best checkpoint
minimizes the metric.
log (logging.Logger): Optional logger for printing information.
"""
def __init__(
self, save_dir, max_checkpoints, metric_name, maximize_metric=False, log=None
):
super().__init__()
self.save_dir = save_dir
self.max_checkpoints = max_checkpoints
self.metric_name = metric_name
self.maximize_metric = maximize_metric
self.best_val = None
self.ckpt_paths = queue.PriorityQueue()
self.log = log
self._print(
f"Saver will {'max' if maximize_metric else 'min'}imize {metric_name}..."
)
def is_best(self, metric_val):
"""Check whether `metric_val` is the best seen so far.
Args:
metric_val (float): Metric value to compare to prior checkpoints.
"""
if metric_val is None:
# No metric reported
return False
if self.best_val is None:
# No checkpoint saved yet
return True
return (self.maximize_metric and self.best_val < metric_val) or (
not self.maximize_metric and self.best_val > metric_val
)
def _print(self, message):
"""Print a message if logging is enabled."""
if self.log is not None:
self.log.info(message)
def save(self, step, model, metric_vals):
"""Save model parameters to disk.
Args:
step (int): Total number of examples seen during training so far.
model (torch.nn.Module): Model to save.
metric_vals (tuple(float)): Determines whether checkpoint is best so far.
"""
ckpt_dict = {
"model": model.state_dict(),
"step": step,
}
checkpoint_path = os.path.join(self.save_dir, f"step_{step}.pth.tar")
torch.save(ckpt_dict, checkpoint_path)
self._print(f"Saved checkpoint: {checkpoint_path}")
if not isinstance(metric_vals, tuple):
metric_vals = (metric_vals,)
if self.is_best(metric_vals):
# Save the best model
self.best_val = metric_vals
best_path = os.path.join(self.save_dir, "best.pth.tar")
shutil.copy(checkpoint_path, best_path)
self._print(f"New best checkpoint at step {step}...")
# Add checkpoint path to priority queue (lowest priority removed first)
if self.maximize_metric:
priority_order = tuple(metric_val for metric_val in metric_vals)
else:
priority_order = tuple(-metric_val for metric_val in metric_vals)
self.ckpt_paths.put((priority_order, checkpoint_path))
# Remove a checkpoint if more than max_checkpoints have been saved
if self.ckpt_paths.qsize() > self.max_checkpoints:
_, worst_ckpt = self.ckpt_paths.get()
try:
os.remove(worst_ckpt)
self._print(f"Removed checkpoint: {worst_ckpt}")
except OSError:
# Avoid crashing if checkpoint has been removed or protected
pass
@staticmethod
def load_model(model, checkpoint_path, device, strict):
"""Load model parameters from disk.
Args:
model (torch.nn.Module): Load parameters into this model.
checkpoint_path (str): Path to checkpoint to load.
device: device to reload to
Returns:
model (torch.nn.Module): Model loaded from checkpoint.
step (int): Step at which checkpoint was saved.
"""
ckpt_dict = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(ckpt_dict["model"], strict=strict)
step = ckpt_dict["step"]
return model, step
```
|
{
"source": "Jeff-Duda/pygraf",
"score": 3
}
|
#### File: pygraf/adb_graphics/conversions.py
```python
import numpy as np
def k_to_c(field, **kwargs):
''' Conversion from Kelvin to Celsius '''
return field - 273.15
def k_to_f(field, **kwargs):
''' Conversion from Kelvin to Farenheit '''
return (field - 273.15) * 9/5 + 32
def kgm2_to_in(field, **kwargs):
''' Conversion from kg per m^2 to inches '''
return field * 0.03937
def magnitude(a, b, **kwargs):
''' Return the magnitude of vector components '''
return np.sqrt(np.square(a) + np.square(b))
def m_to_dm(field, **kwargs):
''' Conversion from meters to decameters '''
return field / 10.
def m_to_in(field, **kwargs):
''' Conversion from meters to inches '''
return field * 39.3701
def m_to_kft(field, **kwargs):
''' Conversion from meters to kilofeet '''
return field / 304.8
def m_to_mi(field, **kwargs):
''' Conversion from meters to miles '''
return field / 1609.344
def ms_to_kt(field, **kwargs):
''' Conversion from m s-1 to knots '''
return field * 1.9438
def pa_to_hpa(field, **kwargs):
''' Conversion from Pascals to hectopascals '''
return field / 100.
def percent(field, **kwargs):
''' Conversion from values between 0 - 1 to percent '''
return field * 100.
def to_micro(field, **kwargs):
''' Convert field to micro '''
return field * 1E6
def to_micrograms_per_m3(field, **kwargs):
''' Convert field to micrograms per cubic meter '''
return field * 1E9
def vvel_scale(field, **kwargs):
''' Scale vertical velocity for plotting '''
return field * -10
def vort_scale(field, **kwargs):
''' Scale vorticity for plotting '''
return field / 1E-05
def weasd_to_1hsnw(field, **kwargs):
''' Conversion from snow wter equiv to snow (10:1 ratio) '''
return field * 10.
```
#### File: pygraf/adb_graphics/errors.py
```python
class Error(Exception):
'''Base class for handling errors'''
class FieldNotUnique(Error):
'''Exception raised when multiple Grib fields are found with input parameters'''
class GribReadError(Error):
'''Exception raised when there is an error reading the grib file.'''
def __init__(self, name, message="was not found"):
self.name = name
self.message = message
super().__init__(message)
def __str__(self):
return f'"{self.name}" {self.message}'
class NoGraphicsDefinitionForVariable(Error):
'''Exception raised when there is no configuration for the variable.'''
class LevelNotFound(Error):
'''Exception raised when there is no configuration for the variable.'''
class OutsideDomain(Error):
'''Exception raised when there is no configuration for the variable.'''
```
#### File: pygraf/tests/test_grib.py
```python
import datetime
import numpy as np
from matplotlib import colors as mcolors
import xarray as xr
import adb_graphics.datahandler.gribdata as gribdata
import adb_graphics.datahandler.gribfile as gribfile
DATAARRAY = xr.core.dataarray.DataArray
def test_UPPData(natfile, prsfile):
''' Test the UPPData class methods on both types of input files. '''
nat_ds = gribfile.GribFile(natfile)
prs_ds = gribfile.GribFile(prsfile)
class UPP(gribdata.UPPData):
''' Test class needed to define the values as an abstract class '''
def values(self, level=None, name=None, **kwargs):
return 1
upp_nat = UPP(nat_ds.contents, fhr=2, filetype='nat', short_name='temp')
upp_prs = UPP(prs_ds.contents, fhr=2, short_name='temp')
# Ensure appropriate typing and size (where applicable)
for upp in [upp_nat, upp_prs]:
assert isinstance(upp.anl_dt, datetime.datetime)
assert isinstance(upp.clevs, np.ndarray)
assert isinstance(upp.date_to_str(datetime.datetime.now()), str)
assert isinstance(upp.fhr, str)
assert isinstance(upp.field, DATAARRAY)
assert isinstance(upp.latlons(), list)
assert isinstance(upp.lev_descriptor, str)
assert isinstance(upp.ncl_name(upp.vspec), str)
assert isinstance(upp.numeric_level(), tuple)
assert isinstance(upp.spec, dict)
assert isinstance(upp.valid_dt, datetime.datetime)
assert isinstance(upp.vspec, dict)
# Test for appropriate date formatting
test_date = datetime.datetime(2020, 12, 5, 12)
assert upp.date_to_str(test_date) == '20201205 12 UTC'
def test_fieldData(prsfile):
''' Test the fieldData class methods on a prs file'''
prs_ds = gribfile.GribFile(prsfile)
field = gribdata.fieldData(prs_ds.contents, fhr=2, level='500mb', short_name='temp')
assert isinstance(field.cmap, mcolors.Colormap)
assert isinstance(field.colors, np.ndarray)
assert isinstance(field.corners, list)
assert isinstance(field.ticks, int)
assert isinstance(field.units, str)
assert isinstance(field.values(), DATAARRAY)
assert isinstance(field.aviation_flight_rules(field.values()), DATAARRAY)
assert isinstance(field.wind(True), list)
assert len(field.corners) == 4
assert len(field.wind(True)) == 2
assert len(field.wind('850mb')) == 2
for component in field.wind(True):
assert isinstance(component, DATAARRAY)
# Test retrieving other values
assert np.array_equal(field.values(), field.values(name='temp', level='500mb'))
# Return zeros by subtracting same field
diff = field.field_diff(field.values(), variable2='temp', level2='500mb')
assert isinstance(diff, DATAARRAY)
assert not np.any(diff)
# Test transform
assert np.array_equal(field.get_transform('conversions.k_to_f', field.values()), \
(field.values() - 273.15) * 9/5 +32)
field2 = gribdata.fieldData(prs_ds.contents, fhr=2, level='ua', short_name='ceil')
transforms = field2.vspec.get('transform')
assert np.array_equal(field2.get_transform(transforms, field2.values()), \
field2.field_diff(field2.values(), variable2='gh', level2='sfc') / 304.8)
# Expected size of values
assert len(np.shape((field.values()))) == 2
assert len(np.shape((field.values(name='u')))) == 2
assert len(np.shape((field.values(name='u', level='850mb')))) == 2
def test_profileData(natfile):
''' Test the profileData class methods on a nat file'''
nat_ds = gribfile.GribFile(natfile)
loc = ' BNA 9999 99999 36.12 86.69 597 Nashville, TN\n'
profile = gribdata.profileData(nat_ds.contents,
fhr=2,
filetype='nat',
loc=loc,
short_name='temp',
)
assert isinstance(profile.get_xypoint(40., -100.), tuple)
assert isinstance(profile.values(), DATAARRAY)
# The values should return a single number (0) or a 1D array (1)
assert len(np.shape((profile.values(level='best', name='li')))) == 0
assert len(np.shape((profile.values(name='temp')))) == 1
```
|
{
"source": "jeffeb3/hockey_script",
"score": 3
}
|
#### File: jeffeb3/hockey_script/email_hockey.py
```python
import sys
import datetime
import optparse
import random
import smtplib
import urllib2
import json
from email.mime.text import MIMEText
import settings
''' This script invites people to come play hockey. '''
# Globals, yuk
debug = False
sunset = 24.0
def fetch_forecast(request_type):
''' fetches a particular forcast from wunderground. Returns a dict (from the json).'''
if debug:
try:
with open(request_type.replace('/','_') + '.json', 'r') as fp:
return json.load(fp)
except IOError:
print "Debug mode enabled, but file missing, let's cache it"
pass
url = 'http://api.wunderground.com/api/' + settings.api_key + '/' + request_type +'/q/' + settings.location + '.json'
j = json.load(urllib2.urlopen(url))
if debug:
with open(request_type.replace('/','_') + '.json', 'w') as fp:
json.dump(j, fp, sort_keys=True, indent=4)
return j
def is_dst(date):
if date.month < 3 or date.month > 11:
return False
if date.month > 3 and date.month < 11:
return True
previous_sun = date.day - date.isoweekday()
if date.month == 3 and previous_sun >= 8:
return True
if date.month == 11 and previous_sun <= 0:
return True
def get_snide_remark(date):
remarks = [
'If you don\'t come to hockey tomorrow, you\'ll regret it',
'Hockey is the best exercise',
'Forget the weather, just come and play hockey',
'Don\'t you want to be cool like the other kids? They all play hockey',
'Please reply immediately, or no one will know you want to come, and they will say no. Then no one will come. Then people will stop caring. Then hockey will end.',
'If you don\'t come to hockey, then no one will. You are the only person that matters at all.',
'Obesity in America hit an all time record today.',
'Only you can prevent soccer kids from playing on hockey rinks.',
'Only you can prevent lacrosse kids from playing on hockey rinks.',
'If no one plays hockey, they will turn the rink into a pickle ball court',
'Participation is mandatory. [Insert Boss\'s name here] said so, and (s)he\'s your boss.',
]
# how many weeks are left before the first post dst game?
games_before_dst = 0
test_date = date
if is_dst(date):
for i in range(52):
games_before_dst += 1
test_date += datetime.timedelta(7)
if not is_dst(test_date):
break;
if is_dst(date):
remarks.append('The summer is almost over. There are only ' + str(games_before_dst) + ' remaining before the end of DST')
if is_dst(date) and games_before_dst == 1:
return 'OMG! This is the last game before Daylight Savings Time! You have to come'
if is_dst(date) and not is_dst(date + datetime.timedelta(-7)):
return 'DST has made the season possible'
random.seed()
return random.choice(remarks)
def build_html_body(date, time):
# Retrieve information from the Internet
forecast = fetch_forecast('hourly10day/astronomy/forecast')
html_body = """\
<html>
<head></head>
<body>
"""
# Insert Snide Remark
html_body += "<h2>" + get_snide_remark(date) + "</h2>\n"
# let's now build the HTML body contents
html_body += '<h3>Weather for ' + date.strftime("%A %B %e") + ' at ' + settings.location_name + " (" + settings.location + ")" + '</h3>\n'
found = False
# Loop through the simple forcast, and come up with the text.
for period in forecast['forecast']['txt_forecast']['forecastday']:
date_name = date.strftime("%A")
if period['title'] == date_name:
found = True
html_body += '<p><b>' + period['title'] + '</b>: ' + period['fcttext'] + '</p>\n'
if not found:
html_body += '<p>No forecast for that day.</p>\n'
# Look through the hourly stuff
html_body += '<h3>Hourly</h3>\n'
html_body += '<p>\n'
for period in forecast['hourly_forecast']:
if int(period['FCTTIME']['mday']) == date.day:
hour = int(period['FCTTIME']['hour'])
if hour >= int(time) and hour - int(time) <= 3:
hourline = ' ' + period['FCTTIME']['civil'] + " : " + period['condition']
hourline += " Temp: " + period['temp']['english'] + "F "
hourline += " Wind: " + period['wspd']['english'] + "mph "
hourline += " Clouds: " + period['sky'] + "% "
hourline += " Chance of Precip: " + period['pop'] + "% "
if float(period['qpf']['english']) > 0.0:
hourline += " Rain: " + period['qpf']['english'] + "in "
if float(period['snow']['english']) > 0.0:
hourline += " Snow: " + period['snow']['english'] + "in "
hourline += "<br>\n"
html_body += hourline
html_body += '</p>\n'
# Set the sunset stuff
global sunset
sunset = float(forecast['sun_phase']['sunset']['hour']) + float(forecast['sun_phase']['sunset']['minute'])/60.0
sunset_obj = datetime.time(int(sunset), int((sunset % 1.0) * 60.0))
html_body += "<h3>Sunset is at " + sunset_obj.strftime('%l:%m %p') + "</h3>\n"
html_body += """
<p>Enjoy!</p><p>--Hockey Robot created by <NAME> and <NAME></p>
</body>
</html>
"""
return html_body
def send_email(subject, mail_text):
# Set up the message subject, etc. Then send it.
COMMASPACE = ', '
msg = MIMEText(mail_text, 'html')
msg['Subject'] = subject
msg['From'] = settings.email_from
msg['To'] = COMMASPACE.join(settings.email_to)
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(settings.email_login, settings.email_pw)
server.sendmail(settings.email_from, settings.email_to, msg.as_string())
server.quit()
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('-w', '--day_of_week', default=None,
help="The day of the week, indexed from 0 at Monday.")
parser.add_option('-t', '--time', default=None,
help="The time of the event. Decimal hours out of 24. 5:30 is 17.5")
parser.add_option('-d', '--debug', default=False, action="store_true",
help="Debug, which will fetch and store the forecast, and it will not email anyone (it will print instead).")
options, _ = parser.parse_args()
if not options.day_of_week:
parser.error('Day of week not provided.')
if not options.time:
parser.error('Time not provided.')
debug = options.debug
event_date = datetime.date.today()
dow = int(options.day_of_week)
dow %= 7
while event_date.weekday() != dow:
event_date = event_date + datetime.timedelta(1)
event_time = float(options.time)
event_time %= 24.0
mail_text = build_html_body(event_date, event_time)
if event_time + 1.0 > sunset:
print 'Not sending email today. Because the sun will set too early.'
sys.exit(0)
event_time_obj = datetime.time(int(event_time), int(event_time % 1.0 * 60.0))
subject = "Who's in? " + settings.location_name + ", " + event_date.strftime("%A %B %e") + " at " + event_time_obj.strftime("%l:%M %p")
if debug:
print subject
print mail_text
else:
send_email(subject, mail_text)
print 'Sent email at ', datetime.datetime.now()
```
|
{
"source": "jeffeb3/MarlinConfiguration",
"score": 2
}
|
#### File: jeffeb3/MarlinConfiguration/configMarlin.py
```python
import sys
import os.path
from optparse import OptionParser
import subprocess
import logging
import shutil
import fileinput
# import yaml from pyyaml: http://pyyaml.org/wiki/PyYAMLDocumentation
from yaml import load as loadYaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
# Set the default logging to INFO
logging.basicConfig(format='%(levelname)s - %(message)s')
gLog = logging.getLogger()
gLog.setLevel(logging.INFO)
def getOptions():
''' Set the user interface for this application. '''
parser = OptionParser()
parser.description = \
'''
This tool is used to generate configurations for Marlin, from a
specific version. It uses a specific diff format to configure
changes to the software, and tries to be as generic as possible,
but it's written specifically for the changes needed for the machines
made by vicious1.com.
License is MIT.
'''
parser.add_option('-c', '--config', dest='config',
help='Config file that describes the changes needed to configure Marlin')
parser.add_option('-n', '--name', dest='name', default='Marlin_MPCNC',
help='Name that describes this firmware')
parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='Set to see more of the steps being taken. Useful for debugging')
parser.add_option('-g', '--git-server', dest='git_server', default="https://github.com/MarlinFirmware/Marlin",
help='Location to get Marlin from, in standard git clone syntax.')
parser.add_option('-t', '--git-tag', dest='git_tag', default="1.1.x",
help='Tag or branch name to use for the checkout')
options, _ = parser.parse_args()
if not options.config or not os.path.exists(options.config):
gLog.fatal("Missing Config. Aborting.")
sys.exit(-1)
if options.verbose:
# Allow more message through.
gLog.setLevel(logging.DEBUG)
gLog.debug('options:\n' + str(options))
return options
def cloneRepo(git_server, git_tag):
''' Clone a repo, to a specific branch or tag. '''
gLog.info('Cloning Marlin')
gLog.debug("Marlin repo: '{}'".format(git_server))
gLog.debug("Marlin tag: '{}'".format(git_tag))
# Clone the repo, with the appropriate tag
proc = subprocess.Popen(['git', 'clone', '-b', git_tag, git_server, 'Marlin'],
stdout=None, stderr=None, shell=False)
proc.wait()
ok = proc.returncode == 0
if not ok:
gLog.fatal('Failed to clone Marlin. See above for details')
return ok
if __name__ == '__main__':
options = getOptions()
# read in the config (difference) file
with open(options.config, 'r') as configFile:
config = loadYaml(configFile, Loader=Loader)
gLog.debug("Config:\n" + str(config))
if not cloneRepo(options.git_server, options.git_tag):
sys.exit(-2)
for filename in config:
gLog.debug('editing {}'.format('Marlin' + os.sep + filename))
file = fileinput.FileInput('Marlin' + os.sep + filename, inplace=True)
for line in file:
for (before, after) in config[filename]:
if before in line:
line = line.replace(before, after)
print line,
# Copy the Marlin/Marlin folder to it's own place.
shutil.copytree('Marlin' + os.sep + 'Marlin', options.name)
shutil.move(options.name + os.sep + 'Marlin.ino', options.name + os.sep + options.name + '.ino')
# remove the example configurations
shutil.rmtree(options.name + os.sep + 'example_configurations')
# Clean up the Marlin folder
shutil.rmtree('Marlin')
```
|
{
"source": "jeffeb3/sandypi",
"score": 3
}
|
#### File: server/database/playlist_elements_tables.py
```python
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import func
from server import db
# creates base class with common methods
class PlaylistElements(object):
# returns the list of elements inside a playlist
@classmethod
def get_playlist_elements(cls):
if cls == PlaylistElements:
raise NotImplementedError("Must use a table class to query the elements")
return cls.query.all()
# clear all the elements inside a table
@classmethod
def clear_elements(cls):
if cls == PlaylistElements:
raise NotImplementedError("Must use a table class to clean the table")
res = db.session.query(cls).delete()
db.session.commit()
return res
# get random drawing element from the playlist (for the shuffle element)
@classmethod
def get_random_drawing_element(cls):
return cls.query.filter(cls.drawing_id.isnot(None)).order_by(func.random()).first()
# creates sqlalchemy base class with the addition of the custom class
Base = declarative_base(cls = PlaylistElements)
Base.query = db.session.query_property()
def get_playlist_table_class(id):
if id is None: raise ValueError("A playlist id must be specified")
table_name = "_playlist_{}".format(id) # table name is prefix + table_id
# if table exist use autoload otherwise create the table
table_exist = table_name in sqlalchemy.inspect(db.engine).get_table_names()
class PTable(Base):
__tablename__ = table_name # table name
# set table args to load existing table if possible
__table_args__ = {'extend_existing': True, 'autoload': table_exist, 'autoload_with': db.get_engine()}
id = db.Column(db.Integer, primary_key=True)
element_type = db.Column(db.String(10), default="")
drawing_id = db.Column(db.Integer, default = None) # drawing id added explicitely for possible queries
element_options = db.Column(db.String(1000), default="") # element options
# change class attrs manually to avoid getting a warning ("This declarative base already contains a class with the same class name and module name")
PTable.__name__ = table_name
PTable.__qualname__ = table_name
PTable.__module__ = table_name
if not table_exist:
PTable.__table__.create(db.get_engine())
return PTable
def create_playlist_table(id):
"""
Create a table associated to a single playlist.
The number of tables will be the same as the number of playlists.
"""
p_class = get_playlist_table_class(id)
def delete_playlist_table(id):
"""
Delete a table associated to a single playlist.
"""
p_class = get_playlist_table_class(id)
p_class.__table__.drop(db.get_engine())
```
#### File: server/hw_controller/firmware_defaults.py
```python
from dotmap import DotMap
MARLIN = DotMap()
MARLIN.name = "Marlin"
MARLIN.ACK = "ok"
MARLIN.buffer_command = "M114"
MARLIN.emergency_stop = "M112"
MARLIN.buffer_timeout = 30
MARLIN.ready_message = "start"
MARLIN.position_tolerance = 0.01
def is_marlin(val):
return val == MARLIN.name
GRBL = DotMap()
GRBL.name = "Grbl"
GRBL.ACK = "ok"
GRBL.buffer_command = "?"
GRBL.emergency_stop = "!"
GRBL.buffer_timeout = 5
GRBL.ready_message = "Grbl"
def is_grbl(val):
return val == GRBL.name
def get_ACK(firmware):
if firmware == MARLIN.name:
return MARLIN.ACK
else: return GRBL.ACK
def get_buffer_command(firmware):
if firmware == MARLIN.name:
return MARLIN.buffer_command
else: return GRBL.buffer_command
def get_buffer_timeout(firmware):
if firmware == MARLIN.name:
return MARLIN.buffer_timeout
else: return GRBL.buffer_timeout
def get_emergency_stop_command(firmware):
if firmware == MARLIN.name:
return MARLIN.emergency_stop
else: return GRBL.emergency_stop
def get_ready_message(firmware):
if firmware == MARLIN.name:
return MARLIN.ready_message
else: return GRBL.ready_message
```
#### File: server/hw_controller/queue_manager.py
```python
from queue import Queue
import json
from server.database.playlist_elements import ShuffleElement
class QueueManager():
def __init__(self, app, socketio):
self._isdrawing = False
self._element = None
self.app = app
self.socketio = socketio
self.q = Queue()
def is_drawing(self):
return self._isdrawing
def is_queue_empty(self):
return not self._isdrawing and len(self.q.queue)==0
def set_is_drawing(self, dr):
self._isdrawing = dr
def get_element(self):
return self._element
def set_element(self, element):
self.app.logger.info("Code: {}".format(element))
self._element = element
self.set_is_drawing(True)
# stop the current drawing and start the next
def stop_drawing(self):
self.app.feeder.stop()
# add an element to the queue
def queue_element(self, element, show_toast=True):
if self.q.empty() and not self.is_drawing():
self.start_element(element)
return
self.app.logger.info("Adding {} to the queue".format(element.drawing_id))
self.q.put(element)
if show_toast:
self.app.semits.show_toast_on_UI("Element added to the queue")
self.send_queue_status()
# return the content of the queue as a string
def queue_str(self):
return str(self.q.queue)
def get_queue(self):
return self.q.queue
# clear the queue
def clear_queue(self):
self.q.queue.clear()
def set_new_order(self, elements):
self.clear_queue()
for el in elements:
if el!= 0:
self.q.put(el)
self.send_queue_status()
# remove the first element with the given code
def remove(self, code):
tmp = Queue()
is_first = True
for c in self.q.queue:
if c == code and is_first:
is_first = False
else:
tmp.put(c)
self.q = tmp
# queue length
def queue_length(self):
return self.q.qsize()
def update_status(self):
pass
# in this method should ask the updated status to the feeder (like if is drawing, queue and if necessary other stuff)
# start the next drawing of the queue
# by default will start it only if not already printing something
# with "force_stop = True" will stop the actual drawing and start the next
def start_next(self, force_stop=False):
if(self.is_drawing()):
if not force_stop:
return False
try:
if self.queue_length() > 0:
element = self.q.queue.popleft()
self.start_element(element)
self.app.logger.info("Starting next element: {}".format(element.type))
return True
self._element = None
return False
except Exception as e:
self.app.logger.error("An error occured while starting a new drawing from the queue:\n{}".format(str(e)))
self.start_next()
# This method send a "start" command to the bot with the element
def start_element(self, element):
element = element.before_start(self.app)
if not element is None:
self.app.logger.info("Sending gcode start command")
self.app.feeder.start_element(element, force_stop = True)
else: self.start_next()
def send_queue_status(self):
elements = list(map(lambda x: str(x), self.q.queue)) if len(self.q.queue) > 0 else [] # converts elements to json
res = {
"current_element": str(self._element),
"elements": elements
}
self.app.semits.emit("queue_status", json.dumps(res))
```
#### File: server/sockets_interface/socketio_callbacks.py
```python
import json
import shutil
import os
from server import socketio, app, db
from server.utils import settings_utils, software_updates
from server.database.models import Playlists
from server.database.playlist_elements import DrawingElement, GenericPlaylistElement
from server.database.models import UploadedFiles, Playlists
# request to check if a new version of the software is available
@socketio.on('software_updates_check')
def handle_software_updates_check():
result = software_updates.compare_local_remote_tags()
if result:
if result["behind_remote"]:
toast = """A new update is available ({0})\n
Your version is {1}\n
Check the github page to update to the latest version.
""".format(result["remote_latest"], result["local"])
socketio.emit("software_updates_response", toast)
# TODO split in multiple files?
# --------------------------------------------------------- PLAYLISTS CALLBACKS -------------------------------------------------------------------------------
# delete a playlist
@socketio.on("playlist_delete")
def playlist_delete(id):
try:
Playlists.delete_playlist(id)
app.logger.info("Playlist code {} deleted".format(id))
except Exception as e:
app.logger.error("'Delete playlist code {}' error".format(id))
playlist_refresh()
# save the changes to the playlist
@socketio.on("playlist_save")
def playlist_save(playlist):
playlist = json.loads(playlist)
pl = Playlists.create_playlist() if ((not "id" in playlist) or (playlist["id"] == 0)) else Playlists.get_playlist(playlist['id'])
pl.clear_elements()
pl.name = playlist['name']
pl.add_element(playlist['elements'])
pl.save()
playlist_refresh()
# adds a playlist to the drawings queue
@socketio.on("playlist_queue")
def playlist_queue(code):
item = db.session.query(Playlists).filter(Playlists.id==code).one()
elements = item.get_elements()
for i in elements:
app.qmanager.queue_element(i, show_toast = False)
@socketio.on("playlists_refresh")
def playlist_refresh():
playlists = db.session.query(Playlists).order_by(Playlists.edit_date.desc()).all()
pls = list(map(lambda el: el.to_json(), playlists))
app.semits.emit("playlists_refresh_response", list(map(lambda el: el.to_json(), playlists)))
# --------------------------------------------------------- SETTINGS CALLBACKS -------------------------------------------------------------------------------
# settings callbacks
@socketio.on("settings_save")
def settings_save(data, is_connect):
settings_utils.save_settings(data)
settings = settings_utils.load_settings()
#app.leds_controller.update_settings(settings) # TODO update leds controller settings
app.feeder.update_settings(settings)
app.semits.show_toast_on_UI("Settings saved")
# updating feeder
if is_connect:
app.logger.info("Connecting device")
app.feeder.connect()
if app.feeder.is_connected():
app.semits.show_toast_on_UI("Connection to device successful")
else:
app.semits.show_toast_on_UI("Device not connected. Opening a fake serial port.")
@socketio.on("settings_request")
def settings_request():
settings = settings_utils.load_settings()
settings["serial"]["port"]["available_values"] = app.feeder.serial_ports_list()
settings["serial"]["port"]["available_values"].append("FAKE")
app.semits.emit("settings_now", json.dumps(settings))
@socketio.on("send_gcode_command")
def send_gcode_command(command):
app.feeder.send_gcode_command(command)
@socketio.on("settings_shutdown_system")
def settings_shutdown_system():
app.semits.show_toast_on_UI("Shutting down the device")
os.system("sudo shutdown now")
@socketio.on("settings_reboot_system")
def settings_reboot_system():
app.semits.show_toast_on_UI("Rebooting system...")
os.system("sudo reboot")
# --------------------------------------------------------- DRAWINGS CALLBACKS -------------------------------------------------------------------------------
@socketio.on("drawing_queue")
def drawing_queue(code):
element = DrawingElement(drawing_id=code)
app.qmanager.queue_element(element)
@socketio.on("drawing_delete")
def drawing_delete(code):
item = db.session.query(UploadedFiles).filter_by(id=code).first()
# TODO should delete the drawing also from every playlist
try:
if not item is None:
db.session.delete(item)
db.session.commit()
shutil.rmtree(app.config["UPLOAD_FOLDER"] +"/" + str(code) +"/")
app.logger.info("Drawing code {} deleted".format(code))
app.semits.show_toast_on_UI("Drawing deleted")
except Exception as e:
app.logger.error("'Delete drawing code {}' error".format(code))
@socketio.on("drawings_refresh")
def drawings_refresh():
rows = db.session.query(UploadedFiles).order_by(UploadedFiles.edit_date.desc())
res = []
for r in rows:
res.append({"id": r.id, "filename": r.filename})
app.semits.emit("drawings_refresh_response", json.dumps(res))
# --------------------------------------------------------- QUEUE CALLBACKS -------------------------------------------------------------------------------
@socketio.on("queue_get_status")
def queue_get_status():
app.qmanager.send_queue_status()
@socketio.on("queue_set_order")
def queue_set_order(elements):
app.qmanager.set_new_order(map(lambda e: GenericPlaylistElement.create_element_from_dict(e), json.loads(elements)))
@socketio.on("queue_stop_drawing")
def queue_stop_drawing():
app.semits.show_toast_on_UI("Stopping drawing...")
app.qmanager.stop_drawing()
if not app.qmanager.is_drawing(): # if the drawing was the last in the queue must send the updated status
app.qmanager.send_queue_status()
# --------------------------------------------------------- LEDS CALLBACKS -------------------------------------------------------------------------------
@socketio.on("leds_set_color")
def leds_set_color(data):
color = json.loads(data)
#app.leds_controller.set_color((color["r"], color["g"], color["b"])) # TODO uncomment when ready
# --------------------------------------------------------- MANUAL CONTROL -------------------------------------------------------------------------------
@socketio.on("control_emergency_stop")
def control_emergency_stop():
app.feeder.emergency_stop()
```
#### File: jeffeb3/sandypi/setup.py
```python
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
import time
import platform
import os
from server.utils import settings_utils
class PostDevelopCommand(develop):
def run(self):
develop.run(self)
print("Running post develop script")
# add here post develop install scripts
print("Post develop script done")
class PostInstallCommand(install):
def run(self):
install.run(self)
print("Running post install script")
# add here post install scripts
print("Post install script done")
setup(
name='server',
packages=['server'],
include_package_data=True,
install_requires=[
'flask', 'sqlalchemy',
],
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
},
)
```
#### File: jeffeb3/sandypi/start.py
```python
import os
import platform
import sys
import getopt
import getpass
def generate_start_file(folder):
if platform.system() == "Windows":
print("You are running windows")
print("Preparing start.bat")
file_path = folder + "\\start.bat"
lines = [
"{}\n".format(folder[0:2]), # change drive
"cd {}\n".format(folder), # go to the correct folder
"call .\\env\\Scripts\\activate.bat\n", # activate the environment
"echo Server starting\n", # echo that the server is starting
"flask run --host=0.0.0.0\n"] # start the server
else:
print("You are running linux")
print("Preparing start.sh")
file_path = folder + "/start.sh"
lines = [
"#!/usr/bin/env bash\n",
"cd {}\n".format(folder), # go to the correct folder
"source env/bin/activate\n", # activate the environment
"chmod 777 .\n", # changing permission to the files otherwise cannot use the db
"touch 'server.started'\n", # create a file to say that the server has been started
"flask run --host=0.0.0.0\n"] # start the server
with open(file_path, "w") as f:
f.writelines(lines)
def turn_autostart_on(folder, debug=False):
if platform.system() == "Windows":
print("Adding a bat file to the start folder")
USER_NAME = getpass.getuser()
file_path = "{}\\start.bat".format(folder)
file_runner = "{}\\run_hidden.vbs".format(folder)
bat_path = "C:\\Users\\{}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup".format(USER_NAME)
print("Adding '{0}' to the autostart folder:\n{1}".format(file_path, bat_path))
with open(bat_path + '\\' + "open.bat", "w+") as bat_file:
if debug:
bat_file.write('start "" {}'.format(file_path))
else:
bat_file.write('start {0} {1}'.format(file_runner, file_path))
else:
print("Adding a sh file to the start folder")
file_path = "{}/start.sh".format(folder)
file_runner = "/etc/rc.local"
print("Adding '{0}' to the autostart script:\n{1}".format(file_path, file_runner))
lines = [ "{} &\n".format(file_path) ] # call start.sh. IMPORTANT: must use '&' to let the script close correctly otherwise the pi will get stuck
with open(file_runner, "r+") as f:
d = f.readlines()
f.seek(0)
already_present = False
for i in d:
if file_path in i:
already_present = True
elif "exit 0" in i and len(i) <10:
if not already_present:
f.writelines(lines)
f.write(i)
else:
f.write(i)
os.system("chmod +x {}".format(file_runner))
print("The server will start automatically from the next boot")
def turn_autostart_off(folder):
if platform.system() == "Windows":
print("You are running windows")
print("Removing line from the start bat")
USER_NAME = getpass.getuser()
file_path = "{}\\start.bat".format(folder)
bat_path = "C:\\Users\\{}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup".format(USER_NAME)
print("Removing '{0}' from the autostart folder:\n{1}".format(file_path, bat_path))
with open(bat_path + '\\' + "open.bat", "r+") as f:
d = f.readlines()
f.seek(0)
for i in d:
if not file_path in i:
f.write(i)
f.truncate()
else:
print("Adding a sh file to the start folder")
file_path = "{}/start.sh".format(folder)
file_runner = "/etc/rc.local"
print("Removing '{0}' to the autostart script:\n{1}".format(file_path, file_runner))
lines = [ "{} &\n".format(file_path) ] # call start.sh. IMPORTANT: must use '&' to let the script close correctly otherwise the pi will get stuck
with open(file_runner, "r+") as f:
d = f.readlines()
f.seek(0)
for i in d:
if not file_path in i:
f.write(i)
f.truncate()
os.system("chmod +x {}".format(file_runner))
print("From now, the server will not start automatically on boot")
def start_server(folder):
print("Starting the server")
if platform.system() == "Windows":
os.system("{}\\start.bat".format(folder))
else:
os.system("chmod +x {}/start.sh".format(folder))
os.system("{}/start.sh".format(folder))
def print_help():
print("""\n\nUse: 'python start.py' to start the server. Available options are:
-h: show the help
-a:
valid values: on or off
if 'debug' is used instead of 'on', the cmd windows that starts the server will not be hidden
turns on or off the autostart of the server when the device is turned on""")
if __name__ == "__main__":
folder = os.path.dirname(os.path.realpath(__file__))
generate_start_file(folder) # generate the .bat or .sh file
try:
opts, args = getopt.getopt(sys.argv[1:], "ha:") # check args
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == "-h": # show the help
print_help()
sys.exit(0)
elif opt == "-a": # turn on/off the server automatic start
if arg == "=on":
turn_autostart_on(folder)
sys.exit(0)
elif arg == "=off":
turn_autostart_off(folder)
sys.exit(0)
elif arg == "=debug":
turn_autostart_on(folder, debug=True)
sys.exit(0)
else:
print("Argument for '{}' invalid: use 'on' or 'off'".format(opt))
sys.exit(2)
else:
print("Command '{}' not recognized".format(opt))
sys.exit(2)
# if no argument was used, starts the server
start_server(folder)
```
|
{
"source": "jeffedwards/robyn_python",
"score": 2
}
|
#### File: pypref-master/pypref/prefclasses.py
```python
import numpy as np
# for data sets
import pandas as pd
# Direkt import of c-compiled bnl
import bnl
from . import btg
# General preference classes
# ==========================
# Exception class for all errors in this file
class PreferenceException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class pref:
"""
General main class for preferences.
Consider `pypref.low` for constructing preferences and
`pypref.pref.psel` for evaluating preferences (obtaining the optima).
"""
def get_str(self):
return "(abstract preference)"
def __str__(self):
return "[Preference] " + self.get_str()
# Operators for complex preferences, [pref] {*, &, |, +} [pref]
def __mul__(self, other): return self.__get_cpx_pref(other, _pareto)
def __and__(self, other): return self.__get_cpx_pref(other, _prior)
def __or__ (self, other): return self.__get_cpx_pref(other, _intersect)
def __add__(self, other): return self.__get_cpx_pref(other, _union)
# Generate complex preference from operator
def __get_cpx_pref(self, other, constructor):
if (isinstance(self, empty)):
return other
elif (isinstance(other, empty)):
return self
else:
return constructor(self, other)
# Operator for reverse preference
def __neg__(self):
return reverse(self)
# Preference evaluation
# =====================
def psel_idx(self, df, **top_args):
"""
Performs a preference selection and returns the indices
See `psel` for details.
"""
# Empty preference => Return entire data set
if isinstance(self, empty): return(range(0, len(df)))
# Empty data set => Return empty indices
if len(df) == 0: return(np.array([], dtype = int))
# Get second argument of score_vals (first argument is next_id)
score_array = np.array(self.get_score_vals(df, 0)[1])
# Serialize pref
serial_pref = self.serialize()
# pick top-k argument
def get_arg(name, default = -1):
if name in top_args.keys():
return top_args[name]
else:
return default
# get param and check for int parameter
def get_int(name, default = -1):
val = get_arg(name, default)
if ((not isinstance(val, int)) or val < -1):
raise PreferenceException('Parameter "' + name + '" must be a positive integer value')
return val
# get param and check for bool parameter
def get_bool(name, default = False):
val = get_arg(name, default)
if (not isinstance(val, bool)):
raise PreferenceException('Parameter "' + name + '" must be Boolean')
return val
if (len(top_args) > 0): # top-k mode
topk = get_int('top')
at_least = get_int('at_least')
top_level = get_int('top_level')
and_connected = get_bool('and_connected', True)
show_level = get_bool('show_level', False)
if topk == -1 and at_least == -1 and and_connected == -1:
raise PreferenceException("Expected at least one topk parameter!")
# Call Cython BNL
res = bnl.runbnl_top(score_array, serial_pref, topk, at_least, top_level, and_connected)
if show_level:
# return tuple indices and level numbers
return pd.DataFrame(np.transpose(np.array(res)), columns = ['_indices', '_level'])
else:
# just return the tuple indices
return res[0]
else: # usual mode (no top-k selection)
# Call Cython BNL
return bnl.runbnl(score_array, serial_pref)
def psel(self, df, **top_args):
"""
Performs a preference selection and returns the tuples
Parameters
----------
df : data frame
A data frame where the preference is applied to.
topargs : One or more of the following arguments (optional):
* A `top` value of k means that the k-best tuples of the data set are returned.
This may be non-deterministic, see below for details.
* An `at_least` value of k returns the top-k tuples and additionally all tuples which are
not dominated by the worst tuple (i.e. the minima) of the Top-k set.
The number of tuples returned is greater or equal than
`at_least`. In contrast to top-k, this is deterministic.
* An `top_level` value of k returns all tuples from the k-best levels.
See below for the definition of a level.
* The logical value `and_connected` is only relevant if more than one of the
above `top`, `at_least`, `top_level` values are given.
Then `True` returns the intersection of the different top-selections
and `False` returns the union.
Returns
-------
A subset of `df` which is optimal for the given preference.
If topargs are given, then a additional column `_level` is appended.
Top-k Preference Selection
---------------------------
For a given `top` value of k the k best elements and their level values are returned.
The level values are determined as follows:
* All the maxima of a data set w.r.t. a preference have level 1.
* The maxima of the remainder, i.e., the data set without the level 1 maxima, have level 2.
* The n-th iteration of "Take the maxima from the remainder" leads to tuples of level n.
"""
if (len(top_args) > 0): # top-k selection
if('show_level' in top_args.keys() and not top_args['show_level']):
# show_level was explicitly set to false ==> do not show level
return df.iloc[self.psel_idx(df, **top_args)]
else:
# show level (true by default)
top_args['show_level'] = True
res = self.psel_idx(df, **top_args)
# get tuples
res_df = df.iloc[res['_indices']]
# insert levels column
res_df.insert(len(res_df.columns), '_level', np.array(res['_level']))
return(res_df)
else: # usual selection (no top-k)
return df.iloc[self.psel_idx(df)]
# Hasse diagramm / precedecessors / successors
# ============================================
# Get the Hasse diagram as (n,2) int matrix containing all edges
def btg(self, df):
"""
Returns the Better-Than-Graph of the preference w.r.t. a given data set `df`, i.e.,
an object of the class btg associated with the preference and the data set.
The Better-Than-Graph contains information about predecessors and successors
of tuples w.r.t. the preference. Additionally it can be visualized as a diagram
using the GraphViz DOT interpreter.
"""
return btg.btg(df, self)
# Special empty preference (neutral element for all complex preferences)
# cannot be evaluated
class empty(pref):
def get_str(self):
return "(empty)"
# Base preferences
# ================
class _basepref(pref):
"""
Base preferences are used to describe the different goals
of a preference query.
Parameters
----------
epxr : string or function
Specifies either an expression over the data set where the preference should
be applied to, or, a function operating on the data set.
Returns
-------
A preference object. This can be used to retrieve the optimal elements w.r.t.
the induced order of preference (see examples), or, to build a complex preference
from it (see complex preferences, below examples).
Details
-------
Mathematically, all base preferences are strict weak orders
(irreflexive, transitive and negative transitive).
The three fundamental base preferences are:
low("a"), high("a") :
Search for minimal/maximal values of a, i.e.,
the induced order is the "smaller than" or "greater than" order
on the values of a. The values of a must be numeric values.
true("a") :
Searches for true values in logical expressions, i.e.,
TRUE is considered to be better than FALSE. The values of a must be
logical values. For a tuplewise evaluation of a complex logical expression
one has to use the & and | operators for logical AND/OR
(and not the "or" and "and" operators).
Examples
--------
The following two examples show two different, but semantically equivalent,
ways to define a preference maximizing "4 * mpg + hp" for the mtcars data set.
>>> pref = p.high("4 * mpg + hp")
>>> pref.psel(p.get_mtcars())
>>> pref = p.high(lambda x : 4 * x['mpg'] + x['hp'])
>>> pref.psel(p.get_mtcars())
The following example picks those cars having 4 cylinders and a miles-per-gallone
value less then 23. If there would be no such cars, all cars would be returned.
>>> p.true("(cyl == 4) & (mpg < 23.0)")
>>> pref.psel(p.get_mtcars())
Complex Preferences
-------------------
Base preferences and complex preferences can be combined with the follwing operators:
* `p1 & p2` (Prioritization): Constructs the lexicgraphical order of `p1` and `p2`.
* `p1 * p2` (Pareto): Constructs the Pareto preference (for Skyline queries)
involving the the preferences `p1` and `p2`.
* `p1 | p2` (Intersection).
* `p1 + p2` (Union).
"""
eval_fun = None
eval_str = None
score_id = None
def __init__(self, expr):
if isinstance(expr, str):
self.eval_str = expr
self.eval_fun = None
elif callable(expr):
self.eval_fun = expr
self.eval_str = None
else:
raise PreferenceException("Expected string or callable in base preference argument!")
# Gat values from eval_str or eval_fun (not the actual score values, see low/high/true)
def get_val(self, df):
if (not isinstance(df, pd.core.frame.DataFrame)):
raise PreferenceException("Preference evaluation expects a DataFrame")
if (self.eval_str != None):
# Evaluate string and return array
res = np.array(eval(self.eval_str, globals(), df), dtype = float)
if np.isscalar(res): return np.array([res] * len(res), dtype = float)
else: return res
elif (self.eval_fun != None):
# Evaluate function over the entire data set
return self.eval_fun(df)
# String representation (without "[preference]")
def inner_str(self):
if (self.eval_str != None):
return '"' + self.eval_str + '"'
elif (self.eval_fun != None):
return '[function]'
else:
return "(null)"
# Return score values and save the id in the score data set
def get_score_vals(self, df, next_id):
self.score_id = next_id
return (next_id + 1, self.calc_score(df))
# Compare function NOT needed for BNL (implemented separately in C++) but for pred_succ functions
# Compare and Equality for all base prefs: if true, then t1 is better than t2
def cmp(self, params):
(t1, t2, score_lst) = params
return score_lst[self.score_id][t1] < score_lst[self.score_id][t2]
def eq(self, params):
(t1, t2, score_lst) = params
return score_lst[self.score_id][t1] == score_lst[self.score_id][t2]
def serialize(self):
return [ord("s")]
class low(_basepref):
def get_str(self):
return "low(" + self.inner_str() + ")"
# Score for low is the identity
def calc_score(self, df):
return([self.get_val(df)])
class high(_basepref):
def get_str(self):
return "high(" + self.inner_str() + ")"
# Score for high is the negated identity
def calc_score(self, df):
return([-self.get_val(df)])
class true(_basepref):
def get_str(self):
return "true(" + self.inner_str() + ")"
# Score for true: 0 for True and 1 for False
def calc_score(self, df):
# Use 1.*(.) for conversion from bool to float
return([1. - 1. * self.get_val(df)])
# Complex preferences
# ===================
class reverse(pref):
"""
`reverse(p)` returns the converse of the preference `p`.
`-p` is a short-cut for `reverse(p)`.
"""
p = None
def __init__(self, _p):
self.p = _p
def operator(self):
return "-"
def get_score_vals(self, df, next_id):
return self.p.get_score_vals(df, next_id)
def get_str(self):
return self.operator() + self.p.get_str()
# Inherited equality
def eq(self, params):
return self.p.eq(params)
# Compare function swaps the arguments
def cmp(self, params):
(t1, t2, score_lst) = params
return self.p.cmp((t2, t1, score_lst))
def serialize(self):
return [ord(self.operator()), self.p.serialize()]
class _complexpref(pref):
p1 = None
p2 = None
def __init__(self, _p1, _p2):
self.p1 = _p1
self.p2 = _p2
def get_score(self, df):
return self.p1.get_score(df) + self.p2.get_score(df)
def get_str(self):
return self.p1.get_str() + " " + self.operator() + " " + self.p2.get_str()
def get_score_vals(self, df, next_id):
(new_id1, score_list1) = self.p1.get_score_vals(df, next_id)
(new_id2, score_list2) = self.p2.get_score_vals(df, new_id1)
return (new_id2, score_list1 + score_list2)
# Equality for all complex preferences (Compare is preference-specific)
def eq(self, params):
return self.p1.eq(params) & self.p2.eq(params)
def serialize(self):
return [ord(self.operator()), self.p1.serialize(), self.p2.serialize()]
class _pareto(_complexpref):
def operator(self): return "*"
# use | and & instead of and/or such that these functions operate pointwise for pred/succ functions
def cmp(self, params):
return ( ((self.p1.cmp(params) | self.p1.eq(params)) & self.p2.cmp(params)) |
((self.p2.cmp(params) | self.p2.eq(params)) & self.p1.cmp(params)) )
class _prior(_complexpref):
def operator(self): return "&"
def cmp(self, params):
return self.p1.cmp(params) | (self.p1.eq(params) & self.p2.cmp(params))
class _intersect(_complexpref):
def operator(self): return "|"
def cmp(self, params):
return (self.p1.cmp(params) & self.p2.cmp(params))
class _union(_complexpref):
def operator(self): return "+"
def cmp(self, params):
return (self.p1.cmp(params) | self.p2.cmp(params))
```
#### File: robyn_python/python/tests.py
```python
import unittest as ut
# local
from python import fb_robyn_func as frf
########################################################################################################################
# TESTS
class TestStuff(ut.TestCase):
def test_fd_unit_format(self):
"""
Test that formats come out as expected
"""
x_in_test = 1545354165
result = frf.unit_format(x_in=x_in_test)
self.assertEqual(result, '1.5 bln')
x_in_test = 654654
result = frf.unit_format(x_in=x_in_test)
self.assertEqual(result, '654.7 tsd')
x_in_test = 984.654
result = frf.unit_format(x_in=x_in_test)
self.assertEqual(result, '985')
if __name__ == '__main__':
ut.main()
```
|
{
"source": "jeffenhuang/tensorflow_input_image_by_tfrecord",
"score": 3
}
|
#### File: tensorflow_input_image_by_tfrecord/src/flower_train_cnn.py
```python
import tensorflow as tf # tensorflow module
import numpy as np # numpy module
import os # path join
DATA_DIR = "../data/"
TRAINING_SET_SIZE = 3380
BATCH_SIZE = 10
IMAGE_SIZE = 224
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# image object from protobuf
class _image_object:
def __init__(self):
self.image = tf.Variable([], dtype = tf.string)
self.height = tf.Variable([], dtype = tf.int64)
self.width = tf.Variable([], dtype = tf.int64)
self.filename = tf.Variable([], dtype = tf.string)
self.label = tf.Variable([], dtype = tf.int32)
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features = {
"image/encoded": tf.FixedLenFeature([], tf.string),
"image/height": tf.FixedLenFeature([], tf.int64),
"image/width": tf.FixedLenFeature([], tf.int64),
"image/filename": tf.FixedLenFeature([], tf.string),
"image/class/label": tf.FixedLenFeature([], tf.int64),})
image_encoded = features["image/encoded"]
image_raw = tf.image.decode_jpeg(image_encoded, channels=3)
image_object = _image_object()
image_object.image = tf.image.resize_image_with_crop_or_pad(image_raw, IMAGE_SIZE, IMAGE_SIZE)
image_object.height = features["image/height"]
image_object.width = features["image/width"]
image_object.filename = features["image/filename"]
image_object.label = tf.cast(features["image/class/label"], tf.int64)
return image_object
def flower_input(if_random = True, if_training = True):
if(if_training):
filenames = [os.path.join(DATA_DIR, "flower-train-0000%d-of-00002" % i) for i in range(0, 1)]
else:
filenames = [os.path.join(DATA_DIR, "flower-eval-0000%d-of-00002" % i) for i in range(0, 1)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError("Failed to find file: " + f)
filename_queue = tf.train.string_input_producer(filenames)
image_object = read_and_decode(filename_queue)
image = tf.image.per_image_standardization(image_object.image)
# image = image_object.image
# image = tf.image.adjust_gamma(tf.cast(image_object.image, tf.float32), gamma=1, gain=1) # Scale image to (0, 1)
label = image_object.label
filename = image_object.filename
if(if_random):
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(TRAINING_SET_SIZE * min_fraction_of_examples_in_queue)
print("Filling queue with %d images before starting to train. " "This will take a few minutes." % min_queue_examples)
num_preprocess_threads = 1
image_batch, label_batch, filename_batch = tf.train.shuffle_batch(
[image, label, filename],
batch_size = BATCH_SIZE,
num_threads = num_preprocess_threads,
capacity = min_queue_examples + 3 * BATCH_SIZE,
min_after_dequeue = min_queue_examples)
return image_batch, label_batch, filename_batch
else:
image_batch, label_batch, filename_batch = tf.train.batch(
[image, label, filename],
batch_size = BATCH_SIZE,
num_threads = 1)
return image_batch, label_batch, filename_batch
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.05)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.02, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def flower_inference(image_batch):
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(image_batch, [-1, IMAGE_SIZE, IMAGE_SIZE, 3])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1) # 112
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2) # 56
W_conv3 = weight_variable([5, 5, 64, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3) # 28
W_conv4 = weight_variable([5, 5, 128, 256])
b_conv4 = bias_variable([256])
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = max_pool_2x2(h_conv4) # 14
W_conv5 = weight_variable([5, 5, 256, 256])
b_conv5 = bias_variable([256])
h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5)
h_pool5 = max_pool_2x2(h_conv5) # 7
W_fc1 = weight_variable([7*7*256, 2048])
b_fc1 = bias_variable([2048])
h_pool5_flat = tf.reshape(h_pool5, [-1, 7*7*256])
h_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, 1.0)
W_fc2 = weight_variable([2048, 256])
b_fc2 = bias_variable([256])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
W_fc3 = weight_variable([256, 64])
b_fc3 = bias_variable([64])
h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)
W_fc4 = weight_variable([64, 5])
b_fc4 = bias_variable([5])
y_conv = tf.nn.softmax(tf.matmul(h_fc3, W_fc4) + b_fc4)
# y_conv = tf.matmul(h_fc3, W_fc4) + b_fc4
return y_conv
def flower_train():
image_batch_out, label_batch_out, filename_batch = flower_input(if_random = False, if_training = True)
image_batch_placeholder = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 224, 224, 3])
image_batch = tf.reshape(image_batch_out, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3))
label_batch_placeholder = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 5])
label_offset = -tf.ones([BATCH_SIZE], dtype=tf.int64, name="label_batch_offset")
label_batch_one_hot = tf.one_hot(tf.add(label_batch_out, label_offset), depth=5, on_value=1.0, off_value=0.0)
logits_out = flower_inference(image_batch_placeholder)
# loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=label_batch_one_hot, logits=logits_out))
loss = tf.losses.mean_squared_error(labels=label_batch_placeholder, predictions=logits_out)
train_step = tf.train.GradientDescentOptimizer(0.0005).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
# Visualize the graph through tensorboard.
file_writer = tf.summary.FileWriter("./logs", sess.graph)
sess.run(tf.global_variables_initializer())
saver.restore(sess, "/home/yeephycho/github/tensorflow_tutorial/tf-cnn/src/checkpoint-train.ckpt")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess = sess)
for i in range(TRAINING_SET_SIZE * 100):
image_out, label_out, label_batch_one_hot_out, filename_out = sess.run([image_batch, label_batch_out, label_batch_one_hot, filename_batch])
_, infer_out, loss_out = sess.run([train_step, logits_out, loss], feed_dict={image_batch_placeholder: image_out, label_batch_placeholder: label_batch_one_hot_out})
print(i)
print(image_out.shape)
print("label_out: ")
print(filename_out)
print(label_out)
print(label_batch_one_hot_out)
print("infer_out: ")
print(infer_out)
print("loss: ")
print(loss_out)
if(i%50 == 0):
saver.save(sess, "/home/yeephycho/github/tensorflow_tutorial/tf-cnn/src/checkpoint-train.ckpt")
coord.request_stop()
coord.join(threads)
sess.close()
def flower_eval():
image_batch_out, label_batch_out, filename_batch = flower_input(if_random = False, if_training = False)
image_batch_placeholder = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 224, 224, 3])
image_batch = tf.reshape(image_batch_out, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3))
label_tensor_placeholder = tf.placeholder(tf.int64, shape=[BATCH_SIZE])
label_offset = -tf.ones([BATCH_SIZE], dtype=tf.int64, name="label_batch_offset")
label_batch = tf.add(label_batch_out, label_offset)
logits_out = tf.reshape(flower_inference(image_batch_placeholder), [BATCH_SIZE, 5])
logits_batch = tf.to_int64(tf.arg_max(logits_out, dimension = 1))
correct_prediction = tf.equal(logits_batch, label_tensor_placeholder)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, "/home/yeephycho/github/tensorflow_tutorial/tf-cnn/src/checkpoint-train.ckpt")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess = sess)
accuracy_accu = 0
for i in range(29):
image_out, label_out, filename_out = sess.run([image_batch, label_batch, filename_batch])
accuracy_out, logits_batch_out = sess.run([accuracy, logits_batch], feed_dict={image_batch_placeholder: image_out, label_tensor_placeholder: label_out})
accuracy_accu += accuracy_out
print(i)
print(image_out.shape)
print("label_out: ")
print(filename_out)
print(label_out)
print(logits_batch_out)
print("Accuracy: ")
print(accuracy_accu / 29)
coord.request_stop()
coord.join(threads)
sess.close()
#flower_train()
flower_eval()
```
|
{
"source": "jefferdo/gpt-3-client",
"score": 3
}
|
#### File: httpcore/_async/http11.py
```python
from ssl import SSLContext
from typing import AsyncIterator, List, Tuple, Union, cast
import h11
from .._backends.auto import AsyncSocketStream
from .._bytestreams import AsyncIteratorByteStream
from .._exceptions import LocalProtocolError, RemoteProtocolError, map_exceptions
from .._types import URL, Headers, TimeoutDict
from .._utils import get_logger
from .base import AsyncByteStream, ConnectionState
from .http import AsyncBaseHTTPConnection
H11Event = Union[
h11.Request,
h11.Response,
h11.InformationalResponse,
h11.Data,
h11.EndOfMessage,
h11.ConnectionClosed,
]
logger = get_logger(__name__)
class AsyncHTTP11Connection(AsyncBaseHTTPConnection):
READ_NUM_BYTES = 64 * 1024
def __init__(self, socket: AsyncSocketStream, ssl_context: SSLContext = None):
self.socket = socket
self.ssl_context = SSLContext() if ssl_context is None else ssl_context
self.h11_state = h11.Connection(our_role=h11.CLIENT)
self.state = ConnectionState.ACTIVE
def __repr__(self) -> str:
return f"<AsyncHTTP11Connection state={self.state}>"
def info(self) -> str:
return f"HTTP/1.1, {self.state.name}"
def get_state(self) -> ConnectionState:
return self.state
def mark_as_ready(self) -> None:
if self.state == ConnectionState.IDLE:
self.state = ConnectionState.READY
async def handle_async_request(
self,
method: bytes,
url: URL,
headers: Headers,
stream: AsyncByteStream,
extensions: dict,
) -> Tuple[int, Headers, AsyncByteStream, dict]:
timeout = cast(TimeoutDict, extensions.get("timeout", {}))
self.state = ConnectionState.ACTIVE
await self._send_request(method, url, headers, timeout)
await self._send_request_body(stream, timeout)
(
http_version,
status_code,
reason_phrase,
headers,
) = await self._receive_response(timeout)
response_stream = AsyncIteratorByteStream(
aiterator=self._receive_response_data(timeout),
aclose_func=self._response_closed,
)
extensions = {
"http_version": http_version,
"reason_phrase": reason_phrase,
}
return (status_code, headers, response_stream, extensions)
async def start_tls(
self, hostname: bytes, timeout: TimeoutDict = None
) -> AsyncSocketStream:
timeout = {} if timeout is None else timeout
self.socket = await self.socket.start_tls(hostname, self.ssl_context, timeout)
return self.socket
async def _send_request(
self, method: bytes, url: URL, headers: Headers, timeout: TimeoutDict
) -> None:
"""
Send the request line and headers.
"""
logger.trace("send_request method=%r url=%r headers=%s", method, url, headers)
_scheme, _host, _port, target = url
with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):
event = h11.Request(method=method, target=target, headers=headers)
await self._send_event(event, timeout)
async def _send_request_body(
self, stream: AsyncByteStream, timeout: TimeoutDict
) -> None:
"""
Send the request body.
"""
# Send the request body.
async for chunk in stream:
logger.trace("send_data=Data(<%d bytes>)", len(chunk))
event = h11.Data(data=chunk)
await self._send_event(event, timeout)
# Finalize sending the request.
event = h11.EndOfMessage()
await self._send_event(event, timeout)
async def _send_event(self, event: H11Event, timeout: TimeoutDict) -> None:
"""
Send a single `h11` event to the network, waiting for the data to
drain before returning.
"""
bytes_to_send = self.h11_state.send(event)
await self.socket.write(bytes_to_send, timeout)
async def _receive_response(
self, timeout: TimeoutDict
) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]:
"""
Read the response status and headers from the network.
"""
while True:
event = await self._receive_event(timeout)
if isinstance(event, h11.Response):
break
http_version = b"HTTP/" + event.http_version
# h11 version 0.11+ supports a `raw_items` interface to get the
# raw header casing, rather than the enforced lowercase headers.
headers = event.headers.raw_items()
return http_version, event.status_code, event.reason, headers
async def _receive_response_data(
self, timeout: TimeoutDict
) -> AsyncIterator[bytes]:
"""
Read the response data from the network.
"""
while True:
event = await self._receive_event(timeout)
if isinstance(event, h11.Data):
logger.trace("receive_event=Data(<%d bytes>)", len(event.data))
yield bytes(event.data)
elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):
logger.trace("receive_event=%r", event)
break
async def _receive_event(self, timeout: TimeoutDict) -> H11Event:
"""
Read a single `h11` event, reading more data from the network if needed.
"""
while True:
with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):
event = self.h11_state.next_event()
if event is h11.NEED_DATA:
data = await self.socket.read(self.READ_NUM_BYTES, timeout)
# If we feed this case through h11 we'll raise an exception like:
#
# httpcore.RemoteProtocolError: can't handle event type
# ConnectionClosed when role=SERVER and state=SEND_RESPONSE
#
# Which is accurate, but not very informative from an end-user
# perspective. Instead we handle this case distinctly and treat
# it as a ConnectError.
if data == b"" and self.h11_state.their_state == h11.SEND_RESPONSE:
msg = "Server disconnected without sending a response."
raise RemoteProtocolError(msg)
self.h11_state.receive_data(data)
else:
assert event is not h11.NEED_DATA
break
return event
async def _response_closed(self) -> None:
logger.trace(
"response_closed our_state=%r their_state=%r",
self.h11_state.our_state,
self.h11_state.their_state,
)
if (
self.h11_state.our_state is h11.DONE
and self.h11_state.their_state is h11.DONE
):
self.h11_state.start_next_cycle()
self.state = ConnectionState.IDLE
else:
await self.aclose()
async def aclose(self) -> None:
if self.state != ConnectionState.CLOSED:
self.state = ConnectionState.CLOSED
if self.h11_state.our_state is h11.MUST_CLOSE:
event = h11.ConnectionClosed()
self.h11_state.send(event)
await self.socket.aclose()
def is_socket_readable(self) -> bool:
return self.socket.is_readable()
```
|
{
"source": "jefferis/libNeuroML",
"score": 3
}
|
#### File: neuroml/test/test_arraymorph.py
```python
import neuroml.arraymorph as am
import neuroml
import numpy as np
import neuroml.writers as writers
import neuroml.loaders as loaders
try:
import unittest2 as unittest
except ImportError:
import unittest
class TestObjectBuiltMorphology(unittest.TestCase):
def setUp(self):
"""
Testing a complex hand-built morphology (from neuroml objects
rather than arrays)
"""
p = neuroml.Point3DWithDiam(x=0,y=0,z=0,diameter=50)
d = neuroml.Point3DWithDiam(x=50,y=0,z=0,diameter=50)
soma = neuroml.Segment(proximal=p, distal=d)
soma.name = 'Soma'
soma.id = 0
#now make an axon with 100 compartments:
parent = neuroml.SegmentParent(segments=soma.id)
parent_segment = soma
axon_segments = []
seg_id = 1
for i in range(100):
p = neuroml.Point3DWithDiam(x=parent_segment.distal.x,
y=parent_segment.distal.y,
z=parent_segment.distal.z,
diameter=0.1)
d = neuroml.Point3DWithDiam(x=parent_segment.distal.x+10,
y=parent_segment.distal.y,
z=parent_segment.distal.z,
diameter=0.1)
axon_segment = neuroml.Segment(proximal = p,
distal = d,
parent = parent)
axon_segment.id = seg_id
axon_segment.name = 'axon_segment_' + str(axon_segment.id)
#now reset everything:
parent = neuroml.SegmentParent(segments=axon_segment.id)
parent_segment = axon_segment
seg_id += 1
axon_segments.append(axon_segment)
test_morphology = am.ArrayMorphology()
test_morphology.segments.append(soma)
test_morphology.segments += axon_segments
test_morphology.id = "TestMorphology"
self.test_morphology = test_morphology
def test_valid_morphology_ids(self):
morphology = self.test_morphology
self.assertTrue(morphology.valid_ids)
def test_invalid_morphology_ids(self):
morphology = self.test_morphology
morphology.segments[0].id = 5
self.assertFalse(morphology.valid_ids)
def test_num_segments(self):
num_segments = len(self.test_morphology.segments)
self.assertEqual(num_segments,101)
def test_segments_ids_ok(self):
self.assertEqual(self.test_morphology.segments[30].id,30)
def test_soma_still_located_at_zero(self):
self.assertEqual(self.test_morphology.segments[0].name,'Soma')
self.assertEqual(self.test_morphology.segments[0].id,0)
def test_segment_vertices_ok(self):
self.assertEqual(self.test_morphology.segments[1].proximal.x,50.0)
def test_axon_names_ok(self):
self.assertEqual(self.test_morphology.segments[32].name,'axon_segment_32')
def test_segment_instance(self):
seg = self.test_morphology.segments[47]
self.assertIsInstance(seg,neuroml.nml.nml.Segment)
class TestArrayMorphology(unittest.TestCase):
def setUp(self):
num_segments = int(100)
num_vertices = num_segments + 1
x = np.linspace(0,10,num_vertices)
y = np.zeros(num_vertices)
z = np.zeros(num_vertices)
d = np.linspace(1,0.01,num_vertices)
connectivity = range(-1,num_segments)
vertices = np.array([x,y,z,d]).T
self.complex_vertices = vertices
physical_mask = np.zeros(num_vertices)
#third segment is non-physical:
physical_mask[2] = 1
physical_mask[20] = 1
self.complex_morphology = am.ArrayMorphology(vertices=vertices,
connectivity=connectivity,
physical_mask=physical_mask,
id = 'test_arraymorph')
self.valid_vertices = [[0,0,0,0.1],
[1,0,0,0.2],
[2,0,0,0.3],
[3,0,0,0.4]]
self.valid_connectivity = [-1,0,1,2]
self.optimized_morphology = am.ArrayMorphology(vertices=self.valid_vertices,
connectivity=self.valid_connectivity,
id = 'test_arraymorph')
proximal_point = neuroml.Point3DWithDiam(x=0.1,
y=0.2,
z=0.3,
diameter=1.1,)
distal_point = neuroml.Point3DWithDiam(x=0.0,
y=0.0,
z=0.0,
diameter=1.1,)
soma = neuroml.Segment(proximal = proximal_point,
distal = distal_point,)
self.small_morphology = am.ArrayMorphology()
self.small_morphology.segments.append(soma)
def test_single_segment_morphology_instantiation(self):
print(self.small_morphology.connectivity)
seg = self.small_morphology.segments[0]
self.assertIsInstance(seg,neuroml.nml.nml.Segment)
def test_single_segment_morphology_length(self):
self.assertEqual(len(self.small_morphology.segments),1)
def test_index_error(self):
"""
There is no segments[1] for a one-segment morphology
"""
# print self.small_morphology.vertices
# print self.small_morphology.segments[0].id
# print self.small_morphology.segments[1].id
# print 'instnatiated segments:'
# print self.small_morphology.segments.instantiated_segments
self.assertRaises(IndexError,self.small_morphology.segments.__getitem__,1)
def test_single_floating_segment(self):
"""
Because physical_mask[4] = 1 a segment should be skipped as it is
floating.
"""
seg = self.complex_morphology.segments[3]
seg_proximal_x = seg.proximal.x
seg_distal_x = seg.distal.x
equivalent_proximal_vertex = self.complex_vertices[5][0]
equivalent_distal_vertex = self.complex_vertices[4][0]
self.assertEqual(seg_proximal_x,equivalent_proximal_vertex)
self.assertEqual(seg_distal_x,equivalent_distal_vertex)
def test_double_floating_segment(self):
"""
Because physical_mask[4] = 1 a segment should be skipped as it is
floating.
"""
seg = self.complex_morphology.segments[3]
seg_proximal_x = seg.proximal.x
seg_distal_x = seg.distal.x
equivalent_proximal_vertex = self.complex_vertices[5][0]
equivalent_distal_vertex = self.complex_vertices[4][0]
self.assertEqual(seg_proximal_x,equivalent_proximal_vertex)
self.assertEqual(seg_distal_x,equivalent_distal_vertex)
def test_segments_len(self):
num_segments = 98
len_segment_list = len(self.complex_morphology.segments)
self.assertEqual(num_segments,len_segment_list)
def test_add_segment_len(self):
"""
Add a neuroml.Segment() object, the segments proximal
and distal vertices should be used. The internal connectivity
should be passed.
"""
proximal_point = neuroml.Point3DWithDiam(x=0.1,
y=0.2,
z=0.3,
diameter=1.1,)
distal_point = neuroml.Point3DWithDiam(x=0.0,
y=0.0,
z=0.0,
diameter=1.1,)
seg = neuroml.Segment(proximal = proximal_point,
distal = distal_point)
num_segments = len(self.complex_morphology.segments)
self.complex_morphology.segments.append(seg)
len_segment_list = len(self.complex_morphology.segments)
self.assertEqual(num_segments+1, len_segment_list)
self.setUp()
def test_add_segment_vertices_added(self):
proximal_point = neuroml.Point3DWithDiam(x=0.1,
y=0.2,
z=0.3,
diameter=0.1,)
distal_point = neuroml.Point3DWithDiam(x=0.0,
y=0.0,
z=0.0,
diameter=0.1)
seg = neuroml.Segment(proximal = proximal_point,
distal = distal_point)
num_segments = len(self.complex_morphology.segments)
self.optimized_morphology.segments.append(seg)
true_vertices = self.optimized_morphology.vertices
expected_vertices = np.array([[0,0,0,0.1],
[1,0,0,0.2],
[2,0,0,0.3],
[3,0,0,0.4],
[0,0,0,0.1],
[0.1,0.2,0.3,0.1],])
arrays_equal = np.array_equal(true_vertices,expected_vertices)
self.assertTrue(arrays_equal)
self.setUp()
def tes_add_segment_connectivity_valid(self):
pass
def test_num_vertices(self):
"""
Morphology with one segment
"""
self.assertEqual(self.optimized_morphology.num_vertices,4)
def test_valid_morphology(self):
"""
Should return false if morphology is invalid
"""
vertices=[[0,0,0],[1,1]]
connectivity=[-1,0]
self.assertRaises(AssertionError,am.ArrayMorphology,vertices,connectivity)
vertices=[[0,0,0],[1,1,1]]
connectivity=[-1,0,0]
self.assertRaises(AssertionError,am.ArrayMorphology,vertices,connectivity)
vertices=[[0,0,0],[1,1,1]]
connectivity=[]
self.assertRaises(AssertionError,am.ArrayMorphology,vertices,connectivity)
def test_root_index(self):
self.assertEqual(self.optimized_morphology.root_index,0)
def test_physical_indeces(self):
physical_indices = self.optimized_morphology.physical_indices
self.assertTrue(np.array_equal(physical_indices,[0,1,2,3]))
def test_children(self):
self.assertTrue(self.optimized_morphology.children(1),2)
def test_to_root(self):
new_morphology = am.ArrayMorphology(self.optimized_morphology.vertices,
self.optimized_morphology.connectivity)
new_morphology.to_root(2)
new_connectivity = new_morphology.connectivity
self.assertTrue(np.array_equal(new_connectivity,[1,2,-1,2]))
def test_to_neuroml_morphology(self):
neuroml_morphology = self.optimized_morphology.to_neuroml_morphology(id="Test")
self.assertEqual(neuroml_morphology.id,"Test")
self.assertEqual(len(neuroml_morphology.segments),3)
def test_pop(self):
new_morphology = am.ArrayMorphology(self.optimized_morphology.vertices,
self.optimized_morphology.connectivity)#
new_morphology.pop(1)
new_connectivity = new_morphology.connectivity
self.assertTrue(np.array_equal(new_connectivity,[-1,0,1]))
def test_segment_getter(self):
segment = self.optimized_morphology.segments[0]
self.assertIsInstance(segment,neuroml.Segment)
self.assertEqual(segment.proximal.diameter,0.2)
self.assertEqual(segment.distal.diameter,0.1)
def test_segmentlist_getter(self):
segment = self.optimized_morphology.segments[1]
segment_again = self.optimized_morphology.segments[1]
self.assertEqual(segment,segment_again)
def test_segmentlist_setter(self):
p = neuroml.Point3DWithDiam(x=0.9,
y=0.0,
z=0.0,
diameter=0.1)
d = neuroml.Point3DWithDiam(x=0.0,
y=0.0,
z=0.0,
diameter=0.1)
new_segment = neuroml.Segment(proximal=p,
distal=d)
self.optimized_morphology.segments[2] = new_segment
self.assertEqual(self.optimized_morphology.segments[2],new_segment)
def test_segmentlist_setter_by_inference(self):
p = neuroml.Point3DWithDiam(x=0.9,
y=0.0,
z=0.0,
diameter=0.1)
d = neuroml.Point3DWithDiam(x=0.0,
y=0.0,
z=0.0,
diameter=0.1)
new_segment = neuroml.Segment(proximal=p,
distal=d)
self.optimized_morphology.segments[2] = new_segment
self.assertEqual(self.optimized_morphology.segments[2].proximal.x,0.9)
def test_instantiation(self):
"""
Test an arraymorph can be instantiated with default parameters
"""
morphology = am.ArrayMorphology()
def test_parents(self):
"""
A segment by default uses its vertex index as its ID,
as a consequence the first segment has index = 1
"""
test_segment_1 = self.optimized_morphology.segments[0]
test_segment_2 = self.optimized_morphology.segments[1]
self.assertEqual(test_segment_1.id,1)
self.assertEqual(test_segment_2.id,2)
self.assertEqual(test_segment_2.parent.segments,1)
self.assertIsNone(test_segment_1.parent)
def test_valid_morphology_ids(self):
morphology = self.optimized_morphology
self.assertTrue(morphology.valid_ids)
def test_invalid_morphology_ids(self):
morphology = self.optimized_morphology
morphology.segments[0].id = 5
self.assertFalse(morphology.valid_ids)
def test_large_arraymorph(self):
"""
This will generate a morphology which will be difficult to
generate without the optimized intenral representation.
The morphology has 3 million segments
"""
num_segments = int(1e6)
num_vertices = num_segments + 1
x = np.linspace(0,10,num_vertices)
y = np.zeros(num_vertices)
z = np.zeros(num_vertices)
d = np.linspace(1,0.01,num_vertices)
vertices = np.array([x,y,z,d]).T
connectivity = range(-1,num_segments)
big_arraymorph = am.ArrayMorphology(vertices = vertices,
connectivity = connectivity)
self.assertIsInstance(big_arraymorph.segments[3],neuroml.Segment)
self.assertEqual(big_arraymorph.segments[0].distal.diameter,1.0)
#following test not as obvious as it seems - first execution of getter does not have the same result as second
self.assertEqual(big_arraymorph.segments[2333],big_arraymorph.segments[2333])
self.assertEqual(big_arraymorph.segments[0].distal.diameter,1.0)
self.assertEqual(big_arraymorph.segments[num_segments-1].proximal.x,10.0)
self.assertEqual(big_arraymorph.segments[0].distal.x,0.0)
self.assertEqual(big_arraymorph.segments[num_segments-1].proximal.diameter,0.01)
```
#### File: neuroml/test/test_loaders.py
```python
import neuroml
from neuroml import loaders
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
class TestNeuroMLLoader(unittest.TestCase):
def test_load_neuroml(self):
root_dir = os.path.dirname(neuroml.__file__)
print('root dir is:')
print(root_dir)
test_file_path = os.path.join(root_dir,'test')
test_file_path = os.path.join(test_file_path,'Purk2M9s.nml')
print('test file path is:')
print(test_file_path)
f = open(test_file_path,'r')
print(f.read())
doc = loaders.NeuroMLLoader.load(test_file_path)
self.assertEqual(doc.id,'Purk2M9s')
```
#### File: libNeuroML/neuroml/writers.py
```python
import neuroml
from utils import current_neuroml_version
class NeuroMLWriter(object):
@classmethod
def write(cls,nmldoc,file):
"""
Writes from NeuroMLDocument to nml file
in future can implement from other types
via chain of responsibility pattern.
"""
if isinstance(file,str):
file = open(file,'w')
#TODO: this should be extracted from the schema:
namespacedef = 'xmlns="http://www.neuroml.org/schema/neuroml2" '
namespacedef += ' xmlns:xs="http://www.w3.org/2001/XMLSchema"'
namespacedef += ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
namespacedef += ' xsi:schemaLocation="http://www.neuroml.org/schema/neuroml2 https://raw.github.com/NeuroML/NeuroML2/development/Schemas/NeuroML2/NeuroML_%s.xsd"'%current_neuroml_version
nmldoc.export(file,0,name_="neuroml",
namespacedef_=namespacedef) #name_ param to ensure root element named correctly - generateDS limitation
class JSONWriter(object):
"""
Write a NeuroMLDocument to JSON, particularly useful
when dealing with lots of ArrayMorphs.
"""
@classmethod
def __encode_as_json(cls,neuroml_document):
neuroml_document = cls.__sanitize_doc(neuroml_document)
from jsonpickle import encode as json_encode
encoded = json_encode(neuroml_document)
return encoded
@classmethod
def __sanitize_doc(cls,neuroml_document):
"""
Some operations will need to be performed
before the document is JSON-pickleable.
"""
for cell in neuroml_document.cells:
try:
cell.morphology.vertices = cell.morphology.vertices.tolist()
cell.morphology.physical_mask = cell.morphology.physical_mask.tolist()
cell.morphology.connectivity = cell.morphology.connectivity.tolist()
except:
pass
return neuroml_document
@classmethod
def __file_handle(file):
if isinstance(cls,file,str):
import tables
fileh = tables.openFile(filepath, mode = "w")
@classmethod
def write(cls,neuroml_document,file):
if isinstance(file,str):
fileh = open(file, mode = 'w')
else:
fileh = file
if isinstance(neuroml_document,neuroml.NeuroMLDocument):
encoded = cls.__encode_as_json(neuroml_document)
else:
raise NotImplementedError("Currently you can only serialize NeuroMLDocument type in JSON format")
fileh.write(encoded)
@classmethod
def write_to_mongodb(cls,neuroml_document,db,host=None,port=None,id=None):
from pymongo import MongoClient
import json
if id == None:
id = neuroml_document.id
if host == None:
host = 'localhost'
if port == None:
port = 27017
client = MongoClient(host, port)
db = client[db]
collection = db[id]
if isinstance(neuroml_document,neuroml.NeuroMLDocument):
encoded = cls.__encode_as_json(neuroml_document)
encoded_dict = json.loads(encoded)
collection.insert(encoded_dict)
class ArrayMorphWriter(object):
"""
For now just testing a simple method which can write a morphology, not a NeuroMLDocument.
"""
@classmethod
def __write_single_cell(cls,array_morph,fileh,cell_id=None):
vertices = array_morph.vertices
connectivity = array_morph.connectivity
physical_mask = array_morph.physical_mask
# Get the HDF5 root group
root = fileh.root
# Create the groups:
# can use morphology name in future?
if array_morph.id == None:
morphology_name = 'Morphology'
else:
morphology_name = array_morph.id
if cell_id == None:
morphology_group = fileh.createGroup(root, morphology_name)
hierarchy_prefix = "/" + morphology_name
else:
cell_group = fileh.createGroup(root, cell_id)
morphology_group = fileh.createGroup(cell_group, morphology_name)
hierarchy_prefix = '/' + cell_id + '/' + morphology_name
vertices_array = fileh.createArray(hierarchy_prefix, "vertices", vertices)
connectivity_array = fileh.createArray(hierarchy_prefix, "connectivity", connectivity)
physical_mask_array = fileh.createArray(hierarchy_prefix, "physical_mask", physical_mask)
@classmethod
def __write_neuroml_document(cls,document,fileh):
document_id = document.id
for default_id,cell in enumerate(document.cells):
morphology = cell.morphology
if morphology.id == None:
morphology.id = 'Morphology' + str(default_id)
if cell.id == None:
cell.id = 'Cell' + str(default_id)
cls.__write_single_cell(morphology,fileh,cell_id=cell.id)
for default_id,morphology in enumerate(document.morphology):
if morphology.id == None:
morphology.id = 'Morphology' + str(default_id)
cls.__write_single_cell(morphology,fileh,cell_id=cell.id)
@classmethod
def write(cls,data,filepath):
import tables
fileh = tables.openFile(filepath, mode = "w")
#Now instead we should go through a document/cell/morphology
#hierarchy - this kind of tree traversal should be done recursively
if isinstance(data,neuroml.arraymorph.ArrayMorphology):
cls.__write_single_cell(data, fileh)
if isinstance(data,neuroml.NeuroMLDocument):
cls.__write_neuroml_document(data,fileh)
# Finally, close the file (this also will flush all the remaining buffers!)
fileh.close()
```
|
{
"source": "JefferMarcelino/app-ideas",
"score": 4
}
|
#### File: app-ideas/Bin2Dec/Bin2Dec.py
```python
def converter(number):
decimal_number = 0
chars = len(bin_number) # number of characters in binary number
position_chars = chars #position of the number in which the calculations will be made
Condition = None #condition for made the calculations
#cheking the conditions
for digit in number:
if int(digit) > 1:
Condition = False
if chars > 8:
Condition = False
#perfoming the calculations
if Condition is None and True:
for digit in bin_number:
position_chars -= 1
digit = int(digit)
decimal_number += (2**(position_chars)) * digit
print(f"The binary number {bin_number} in decimal is {decimal_number}")
#warning that one of the requirements was not met
else:
print("X - Please, write 0 or 1, and a number less than 8 digits")
#starting the program
bin_number = input("Write a binary number to convert: ")
converter(bin_number)
```
|
{
"source": "JefferMarcelino/Aulas-Python",
"score": 4
}
|
#### File: Curso em Video - Aulas/Aula 20/Aula 20 - Funcoes (Parte 1) - TESTE 06.py
```python
def soma(* valores):
s = 0
for num in valores:
s += num
print("Somando os valores {} temos {}".format(valores, s))
# PROGRAMA PRINCIPAL
soma(5, 2)
soma(2, 9, 4)
```
#### File: Curso em Video - Aulas/Aula 21/Aula 21 - Funcoes (Parte 2) - TESTE 04.py
```python
def teste():
x = 8
print("Na funcao teste, n vale {}".format(n))
print("Na funcao teste, x vale {}".format(x))
# PROGRAMA PRINCIPAL
n = 2
x = 1
print("No programa principal, n vale {}".format(n))
teste()
print("Na programa principal, x vale {}".format(x))
```
#### File: Curso em Video - Aulas/Aula 21/Aula 21 - Funcoes (Parte 2) - TESTE 06.py
```python
def parOuImpar(n=0):
if n % 2 ==0:
return True
else:
return False
num = int(input("Digite um numero: "))
if parOuImpar(num):
print("E par!")
else:
print("Nao e par!")
```
#### File: Aulas-Python/Curso em Video - Exercicios/ex096.py
```python
def area(larg, compr):
calc = larg * compr
print("A area de um terreno {}x{} e de {}m2".format(larg, compr, calc))
# PROGRAMA PRINCIPAL
print("{:^22}".format("Controle de Terreno"))
print("-" * 22)
largura = float(input("LARGURA (m): "))
comprimento = float(input("COMPRIMENTO (m): "))
area(largura, comprimento)
```
#### File: Aulas-Python/Curso em Video - Exercicios/ex097.py
```python
def escreva(msg):
tam = len(msg) + 4
print("~" * tam)
print(f" {msg}")
print("~" * tam)
# PROGRAMA PRINCIPAL
escreva("<NAME>")
escreva("Curso de Python no Youtube")
escreva("CeV")
```
#### File: Aulas-Python/Curso em Video - Exercicios/ex100.py
```python
from random import randint
from time import sleep
numeros = []
def sorteia():
print("Sorteado 5 valores da lista: ", end="")
for c in range(0, 5):
n = randint(0, 10)
numeros.append(n)
print(n, end=" ")
sleep(0.3)
print("PRONTO!")
def pares(lst):
par = 0
for cada in lst:
if cada % 2 == 0:
par += cada
print("Somando os valores pares de {}, temos {}".format(numeros, par))
sorteia()
pares(numeros)
```
#### File: Aulas-Python/Curso em Video - Exercicios/ex104.py
```python
def leiaInt(text):
while True:
b = input(text)
if b.isnumeric():
return b
break
else:
print("\033[31mERRO! Digite um numero inteiro valido\033[m")
# PROGRAMA PRINCIPAL
n = leiaInt("Digite um numero: ")
print("Voce acabou de digitar o numero {}".format(n))
```
#### File: utilidadescev/moeda/__init__.py
```python
def aumentar(preco=0, taxa=0, formatado=False):
res = preco + (preco * taxa / 100)
return res if formatado is False else moeda(res)
def diminuir(preco=0, taxa=0, formatado=False):
res = preco - (preco * taxa / 100)
return res if formatado is False else moeda(res)
def dobro(preco=0, formatado=False):
res = preco * 2
return res if formatado is False else moeda(res)
def metade(preco=0, formatado=False):
res = preco / 2
return res if formatado is False else moeda(res)
def moeda(preco=0, moeda="MTs"):
return f"{preco:.2f}{moeda}".replace(".", ",")
def resumo(p=0, taxaa=10, taxar=5):
print("-" * 30)
print("RESUMO DO VALOR".center(30))
print("-" * 30)
print("Preco analisado: \t\t{}".format(moeda(p)))
print("Dobro do preco: \t\t{}".format(dobro(p, True)))
print("Metade do preco: \t\t{}".format(metade(p, True)))
print("Com {}% de aumento: \t{}".format(taxaa, aumentar(p, taxaa, True)))
print("Com {}% de reducao: \t{}".format(taxar, diminuir(p, taxar, True)))
print("-" * 30)
```
#### File: Aulas-Python/Curso em Video - Exercicios/ex113.py
```python
def leiaInt(msg):
while True:
try:
n = int(input(msg))
break
except (ValueError, TypeError):
print("\033[31mERRO: por favor, digite um numero inteiro valido.\033[m")
continue
return n
def leiaFloat(msg):
while True:
try:
n = float(input(msg))
break
except (ValueError, TypeError):
print("\033[31mERRO: por favor, digite um numero real valido.\033[m")
continue
return n
n1 = leiaInt("Digite um numero inteiro: ")
n2 = leiaFloat("Digite um numero real: ")
print("O valor inteiro digitado foi {} e o real foi {}".format(n1, n2))
```
#### File: POO-Python/aulas/aula02.py
```python
class Televisão:
def __init__(self):
self.ligada = False
self.canal = 2
def muda_canal_para_baixo(self):
self.canal -=1
def muda_canal_para_cima(self):
self.canal +=1
tv = Televisão()
tv.muda_canal_para_cima()
tv.muda_canal_para_cima()
print(tv.canal)
tv.muda_canal_para_baixo()
print(tv.canal)
```
|
{
"source": "JefferMarcelino/Python",
"score": 3
}
|
#### File: Python/Agendamento/Agendamento.py
```python
from tkinter import *
import os
from time import sleep
from datetime import datetime
import sys
janela_principal = Tk()
arq = "dados.txt"
valor = 1
def bt_click1():
global valor
valor = 1
def bt_click2():
global valor
valor = 60
def bt_click3():
while True:
try:
t = int(ed1.get())
except:
lb2["text"] = "ERRO. Por favor, digite um numero valido!"
sleep(1.5)
sys.exit()
else:
break
tempo = valor * 60 * int(ed1.get())
lb2["text"] = "Seu computador sera desligado em {} segundos".format(tempo)
os.system("shutdown -s -f -t {}".format(tempo))
global temp
temp = tempo
def bt_click4():
os.system("shutdown -a")
lb2["text"] = "Agedando o Cancelamento, com Sucesso"
sleep(1.5)
sys.exit()
def bt_click5():
os.system("shutdown -s -f")
janela_principal.geometry("450x200")
janela_principal["bg"] = "sky blue"
janela_principal.title("Agendamento")
lb1 = Label(janela_principal, text="ESCOLHA UMA OPCAO: ", font="14")
lb1.place(x=0, y=0)
lb1["bg"] = "sky blue"
lb2 = Label(janela_principal, text="", font="18")
lb2.place(x=0, y=115)
lb2["bg"] = "sky blue"
bt1 = Button(janela_principal, text="MINUTOS", width=10, font=20, command=bt_click1)
bt1.place(x=0, y=30)
bt1["bg"] = "powder blue"
bt2 = Button(janela_principal, text="HORAS", width=10, font=20, command=bt_click2)
bt2.place(x=0, y=65)
bt2["bg"] = "powder blue"
ed1 = Entry(janela_principal, width=40)
ed1.place(x=150, y=75)
bt3 = Button(janela_principal, text="ENVIAR", width=10, font=20, command=bt_click3)
bt3.place(x=210, y=150)
bt3["bg"] = "powder blue"
bt4 = Button(janela_principal, text="CANCELAR", width=15, font=30, command=bt_click4)
bt4.place(x=50, y=150)
bt4["bg"] = "powder blue"
bt5 = Button(janela_principal, text="DESLIGAR", width=10, font=30, command=bt_click5)
bt5.place(x=325, y=150)
bt5["bg"] = "powder blue"
janela_principal.mainloop()
try:
dado = open(arq, "a")
except:
dado = open(arq, "w")
dado.write("---------> Dia = {} <---------\n".format(datetime.today().date()))
dado.write("--> Horas = {}\n".format(datetime.today().time()))
if valor == 1:
dado.write("--> Tempo agendando: {} Minutos\n".format(temp / 60 / valor))
elif valor == 60:
dado.write("--> Tempo agendando: {} Horas\n".format(temp / 60 / valor))
dado.write("-=" * 30)
dado.write("\n")
```
|
{
"source": "Jefferreira/Burger-FOX",
"score": 2
}
|
#### File: Burger-FOX/orders/views.py
```python
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import CreateView
from cart.cart import Cart
from .forms import OrderCreateForm
from .models import Item, Order
class OrderCreateView(CreateView):
model = Order
form_class = OrderCreateForm
def form_valid(self, form):
cart = Cart(self.request)
if cart:
order = form.save()
for item in cart:
Item.objects.create(
order=order,
product=item["product"],
price=item["price"],
quantity=item["quantity"],
)
cart.clear()
self.request.session["order_id"] = order.id
return redirect(reverse("payments:process"))
return redirect(reverse("pages:home"))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["cart"] = Cart(self.request)
return context
```
|
{
"source": "jeffersfp/blackbox",
"score": 3
}
|
#### File: handlers/databases/_base.py
```python
from abc import abstractmethod
from pathlib import Path
from blackbox.handlers._base import BlackboxHandler
class BlackboxDatabase(BlackboxHandler):
"""An abstract database handler."""
handler_type = "database"
backup_extension = ""
def __init__(self, **kwargs):
"""Set up database handler."""
super().__init__(**kwargs)
self.success = False # Was the backup successful?
self.output = "" # What did the backup output?
@abstractmethod
def backup(self, backup_path: Path):
"""
Back up a database to the provided backup Path.
All subclasses must implement this method.
"""
raise NotImplementedError
@property
def output(self):
"""Return sanitized output only."""
return self.__output
@output.setter
def output(self, sensitive_output: str):
"""Set sanitized output."""
self.__output = self.sanitize_output(sensitive_output)
def get_id_for_retention(self) -> str:
"""Used for deleting only this kind of old backups."""
return self.config.get("id")
```
#### File: handlers/storage/dropbox.py
```python
import os
import re
from datetime import datetime
from pathlib import Path
from dropbox import Dropbox as DropboxClient
from dropbox.exceptions import ApiError
from dropbox.exceptions import AuthError
from dropbox.exceptions import HttpError
from dropbox.files import CommitInfo
from dropbox.files import FileMetadata
from dropbox.files import UploadSessionCursor
from dropbox.files import WriteMode
from blackbox.config import Blackbox
from blackbox.handlers.storage._base import BlackboxStorage
from blackbox.utils.logger import log
class Dropbox(BlackboxStorage):
"""Storage handler that uploads backups to Dropbox."""
required_fields = ("access_token",)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.upload_base = self.config.get("upload_directory") or "/"
self.client = DropboxClient(self.config["access_token"])
self.valid = self._validate_token()
def _validate_token(self):
"""Check if dropbox token is valid."""
try:
return self.client.check_user("test").result == "test"
except AuthError:
return False
def sync(self, file_path: Path) -> None:
"""Sync a file to Dropbox."""
# Check if Dropbox token is valid.
if self.valid is False:
error = "Dropbox token is invalid!"
self.success = False
self.output = error
log.error(error)
return None
# This is size what can be uploaded as one chunk.
# When file is bigger than that, this will be uploaded
# in multiple parts.
chunk_size = 4 * 1024 * 1024
temp_file, recompressed = self.compress(file_path)
upload_path = f"{self.upload_base}{file_path.name}{'.gz' if recompressed else ''}"
try:
with temp_file as f:
file_size = os.stat(f.name).st_size
log.debug(file_size)
if file_size <= chunk_size:
self.client.files_upload(
f.read(), upload_path, WriteMode.overwrite
)
else:
session_start = self.client.files_upload_session_start(
f.read(chunk_size)
)
cursor = UploadSessionCursor(
session_start.session_id,
offset=f.tell()
)
# Commit contains path in Dropbox and write mode about file
commit = CommitInfo(upload_path, WriteMode.overwrite)
while f.tell() < file_size:
if (file_size - f.tell()) <= chunk_size:
self.client.files_upload_session_finish(
f.read(chunk_size),
cursor,
commit
)
else:
self.client.files_upload_session_append(
f.read(chunk_size),
cursor.session_id,
cursor.offset
)
cursor.offset = f.tell()
self.success = True
except (ApiError, HttpError) as e:
log.error(e)
self.success = False
self.output = str(e)
def rotate(self, database_id: str) -> None:
"""
Rotate the files in the Dropbox directory.
All files in base directory of backups will be deleted when they
are older than `retention_days`, and because of this,
it's better to have backups in isolated folder.
"""
# Check if Dropbox token is valid.
if self.valid is False:
log.error("Dropbox token is invalid - Can't delete old backups!")
return None
# Let's rotate only this type of database
db_type_regex = rf"{database_id}_blackbox_\d{{2}}_\d{{2}}_\d{{4}}.+"
# Receive first batch of files.
files_result = self.client.files_list_folder(
self.upload_base if self.upload_base != "/" else ""
)
entries = [entry for entry in files_result.entries if
self._is_backup_file(entry, db_type_regex)]
# If there is more files, receive all of them.
while files_result.has_more:
cursor = files_result.cursor
files_result = self.client.files_list_folder_continue(cursor)
entries += [entry for entry in files_result.entries if
self._is_backup_file(entry, db_type_regex)]
retention_days = 7
if Blackbox.retention_days:
retention_days = Blackbox.retention_days
# Find all old files and delete them.
for item in entries:
last_modified = item.server_modified
now = datetime.now(tz=last_modified.tzinfo)
delta = now - last_modified
if delta.days >= retention_days:
self.client.files_delete(item.path_lower)
@staticmethod
def _is_backup_file(entry, db_type_regex) -> bool:
"""Check if file is actually this kind of database backup."""
return isinstance(entry, FileMetadata) and re.match(db_type_regex, entry.name)
```
|
{
"source": "jefferson2z/books-hub",
"score": 2
}
|
#### File: books/tests/test_forms.py
```python
from django.test import TestCase
class GenreFormTest(TestCase):
def test_uses_genre_form_template(self):
response = self.client.get("/genres/new/")
self.assertTemplateUsed(response, "create_form.html")
```
#### File: books/tests/test_models.py
```python
from django.test import TestCase
from books.models import Book, Genre, Author
class BookAndGenreModelsTest(TestCase):
def test_saving_and_retrieving_genres(self):
first_genre = Genre()
first_genre.title = "Fantasy"
first_genre.save()
saved_genres = Genre.objects.all()
self.assertEqual(saved_genres.count(), 1)
self.assertEqual(saved_genres[0].title, "Fantasy")
def test_saving_and_retrieving_books(self):
first_book = Book()
first_book.title = "Lord of the Rings"
first_book.description = "A tale of friendship"
first_book.isbn10 = "10"
first_book.isbn13 = "13"
first_book.save()
saved_books = Book.objects.all()
self.assertEqual(saved_books.count(), 1)
self.assertEqual(saved_books[0].title, "Lord of the Rings")
self.assertEqual(saved_books[0].description, "A tale of friendship")
self.assertEqual(saved_books[0].isbn10, "10")
self.assertEqual(saved_books[0].isbn13, "13")
def test_saving_and_retrieving_authors(self):
author = Author()
author.name = "Tolkien"
author.save()
saved_authors = Author.objects.all()
self.assertEqual(saved_authors.count(), 1)
self.assertEqual(saved_authors[0].name, "Tolkien")
def test_book_genre_relationship(self):
first_book = Book()
first_book.title = "Lord of the Rings"
first_book.save()
fantasy_genre = Genre()
fantasy_genre.title = "Fantasy"
fantasy_genre.save()
first_book.genres.set([fantasy_genre])
saved_books = Book.objects.all()
self.assertEqual(saved_books[0].genres.all().count(), 1)
book_genre = saved_books[0].genres.get(pk=1)
self.assertEqual(book_genre.title, "Fantasy")
saved_genres = Genre.objects.all()
self.assertEqual(saved_genres[0].books.all().count(), 1)
book = saved_genres[0].books.get(pk=1)
self.assertEqual(book.title, "Lord of the Rings")
def test_author_book_relationship(self):
book = Book()
book.title = "Lord of the Rings"
book.save()
author = Author()
author.name = "Tolkien"
author.save()
book.authors.set([author])
saved_book = Book.objects.get(title="Lord of the Rings")
self.assertEqual(saved_book.authors.all().count(), 1)
```
|
{
"source": "Jefferson3038/Megamercado",
"score": 3
}
|
#### File: Megamercado/main/index_view.py
```python
import tkinter as tk
from tkinter import ttk
from tkinter.font import families
class main_bar():
def __init__(self, _frameLogo, _frameUser):
self._frameLogo = _frameLogo
self._frameUser = _frameUser
def main_bar_view(self):
# Creating title megamercado
self.main_title_reg_pro = tk.Label(self._frameLogo, text='MEGAMERCADO', font=('Roboto Mono Bold', 15), bg='#37648B', fg='white')
self.main_title_reg_pro.grid()
self.user_log = tk.Label(self._frameUser, text='Usuario ADM', font=('Roboto Mono Bold', 15), bg='#37648B', fg='white')
self.user_log.grid()
```
|
{
"source": "Jefferson472/apredendo-django",
"score": 3
}
|
#### File: alura_formulario/passagens/validation.py
```python
def origem_destino_iguais(origem, destino, lista_de_erros):
if origem == destino:
lista_de_erros['destino'] = 'Origem e destino não podem ser iguais'
def campo_tem_algum_numero(valor_campo, nome_campo, lista_de_erros):
if any(char.isdigit() for char in valor_campo):
lista_de_erros[nome_campo] = 'Não pode conter números'
```
#### File: receitas/views/busca.py
```python
from django.shortcuts import render
from receitas.models import Receita
def buscar(request):
lista_receitas = Receita.objects.order_by(
'-data_receita').filter(publicada=True)
if 'buscar' in request.GET:
nome_a_buscar = request.GET['buscar']
if buscar:
lista_receitas = lista_receitas.filter(
nome_receita__icontains=nome_a_buscar)
dados = {
'receitas': lista_receitas
}
return render(request, 'receitas/buscar.html', dados)
```
#### File: animais/tests/test_urls.py
```python
from django.test import TestCase, RequestFactory
from django.urls import reverse
from animais.views import index
class AnimaisURLSTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_rota_url_index(self):
"""Testa se a rota index está funcionando corretamente"""
request = self.factory.get('/')
with self.assertTemplateUsed('index.html'):
response = index(request)
self.assertEqual(response.status_code, 200)
```
#### File: tdd_busca_animal/setup/tests.py
```python
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from animais.models import Animal
class AnimaisTestCase(LiveServerTestCase):
def setUp(self):
chrome_options = Options()
chrome_options.add_argument('--headless')
self.browser = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options)
self.animal = Animal.objects.create(
nome_animal='Leão',
predador='Sim',
venenoso='Não',
domestico='Não'
)
def tearDown(self) -> None:
self.browser.quit()
def test_busca_animal(self):
"""Teste se um usuário pode buscar um animal pelo nome"""
home_page = self.browser.get(self.live_server_url)
brand_element = self.browser.find_element_by_css_selector('.navbar')
self.assertEqual('Busca Animal', brand_element.text)
buscar_animal_input = self.browser.find_element_by_css_selector('input#buscar-animal')
self.assertEqual(buscar_animal_input.get_attribute('placeholder'), "Exemplo: leão, urso...")
buscar_animal_input.send_keys('leão')
self.browser.find_element_by_css_selector('form button').click()
caracteristicas = self.browser.find_elements_by_css_selector('.result-description')
self.assertGreater(len(caracteristicas), 3)
```
|
{
"source": "Jefferson472/django_rest_api",
"score": 2
}
|
#### File: api_escola/escola/models.py
```python
from django.db import models
class Aluno(models.Model):
nome = models.CharField(max_length=30)
rg = models.CharField(max_length=9)
cpf = models.CharField(max_length=11)
data_nascimento = models.DateField()
def __str__(self):
return self.nome
class Curso(models.Model):
NIVEL = (
('B', 'Básico'),
('I', 'Intermediário'),
('A', 'Avançado')
)
cod = models.CharField(max_length=10)
descricao = models.CharField(max_length=100)
nivel = models.CharField(
max_length=1,
choices=NIVEL,
blank=False,
null=False,
default='B'
)
def __str__(self):
return self.descricao
class Matricula(models.Model):
PERIODO = (
('M', 'Matutino'),
('V', 'Vespertino'),
('N', 'Noturno')
)
aluno = models.ForeignKey(Aluno, on_delete=models.CASCADE)
curso = models.ForeignKey(Curso, on_delete=models.CASCADE)
periodo = models.CharField(
max_length=1,
choices=PERIODO,
blank=False,
null=False,
default='M'
)
```
|
{
"source": "Jefferson472/PDF_rename",
"score": 3
}
|
#### File: Jefferson472/PDF_rename/PDF_rename.py
```python
import os
import re
import shutil
import pdfplumber
# ACESSAR DIRETÓRIO E LER SOMENTE ARQUIVOS .PDF
main_folder = os.getcwd()
print(main_folder)
# ACESSA O ARQUIVO PDF E LÊ AS PALAVRAS NAS POSIÇÕES [9], [11] E [12] E RETORNA ESTA INFORMAÇÃO
def ler_pdf(path_pdf):
path = main_folder + '\\' + path_pdf
with pdfplumber.open(path) as pdf:
page = pdf.pages[0]
text = page.extract_words()
try:
po_number = text[9]['text']
forn_name = text[11]['text'].replace('-', '') + ' ' + text[12]['text']
return f'{po_number} - {forn_name}'
except:
log_event = open('log_event.txt', 'a')
log_event.write(f'Não foi possível ler o arquivo: {path} \n')
pass
if __name__ == '__main__':
# VERIFICA ARQUIVO POR ARQUIVO DO DIRETÓRIO ATUAL, CASO SEJA UM .PDF INICIA A EXECUÇÃO DE LEITURA E RENOMEIA O ARQUIVO
for root, dirs, files in os.walk(main_folder):
for file in files:
if re.search(r'\.pdf$', file):
file_name, file_extension = os.path.splitext(file)
new_name = ler_pdf(file)
if new_name is not None:
new_name = 'PO ' + new_name + file_extension
old_file_full_path = os.path.join(root, file)
new_file_full_path = os.path.join(root, new_name)
print(f'Movendo arquivo "{file}" para "{new_name}"')
try:
shutil.move(old_file_full_path, new_file_full_path)
# CRIA UM LOG PARA OS ARQUIVOS QUE NÃO FORAM RENOMEADOS CORRETAMENTE
except:
log_event = open('log_event.txt', 'a')
log_event.write(f'Não foi possível renomear o arquivo: {file} \n')
pass
```
|
{
"source": "Jefferson472/small-projects-in-pyhton",
"score": 4
}
|
#### File: small-projects-in-pyhton/JogoForca/forca_v2.py
```python
import random
from utils import imprimeForca
class Hangman:
def __init__(self, word):
self.word = word
self.secret_word = []
self.wrong_letters = []
# Método para adivinhar a letra
def guess(self, letter):
for letra in self.word:
if letra == letter:
self.secret_word.append(letter)
break
if letter not in self.word:
self.wrong_letters.append(letter)
# Método para verificar se o jogo terminou
def hangman_over(self):
if len(self.wrong_letters) == 7:
return True
# Método para verificar se o jogador venceu
def hangman_won(self):
if len(self.word) == len(self.secret_word):
return True
# Método para não mostrar a letra no board
def hide_word(self):
for letra in self.word:
if letra in self.secret_word:
print(letra, end='')
else:
print('_', end='')
print('\n')
# Método para checar o status do game e imprimir o board na tela
def print_game_status(self):
print(imprimeForca.forca[len(self.wrong_letters)])
# Função para ler uma palavra de forma aleatória do banco de palavras
def rand_word():
with open('JogoForca\BancoPalavras.txt', "rt") as f:
bank = f.readlines()
return bank[random.randint(0,len(bank))].strip()
# Função Main - Execução do Programa
def main():
game = Hangman(rand_word())
# Enquanto o jogo não tiver terminado, print do status, solicita uma letra e faz a leitura do caracter
while True:
game.print_game_status()
game.hide_word()
letter = input('Informe uma letra: ')
if letter == '9':
break
game.guess(letter)
# De acordo com o status, imprime mensagem na tela para o usuário
if game.hangman_won():
print ('\nParabéns! Você venceu!!')
break
if game.hangman_over():
print ('\nGame over! Você perdeu.')
print ('A palavra era ' + game.word)
break
print ('\nFoi bom jogar com você! Agora vá estudar!\n')
if __name__ == "__main__":
main()
```
|
{
"source": "Jefferson5286/PgTools",
"score": 4
}
|
#### File: PgTools/pgtools/screenmanager.py
```python
from abc import ABC, abstractmethod
import pygame
'''
This is the module that will do whatever it takes to create your screens, it is required that
you have pygame installed to work correctly.
All your screens must be a class that inherits Screen().
The ScreenManager class must be called only outside the project's main loop, and it is mandatory
that ScreenManager().update() is called inside the project's main loop, so that everything works correctly.
In Screen(), the methods: on_event() and update() are mandatory.
'''
class ScreenManager:
def __init__(self, surface):
"""
A screen manager, for all screens to work, this class must be called.
[OBS] the screen manager must be called outside the main project cycle,
where all your screens will be added.
:param surface: Use pygame.display.set_mode(), the main project surface.
"""
self.surface = surface
# where the screens will be stored.
self.__screens = {}
# what screen is being displayed.
self.current = ''
# defines whether the event cycles must be internal of the managed or external,
# the main event cycle of the project.
self.internal_cycle_events = True
# define whether to start with the first screen to be added
self.starts_first_screen = True
self.__no_screens = True
# call control for some screen methods.
self.__call_control = {
'on_enter': True,
'on_pre_enter': True
}
def update(self, events_get):
"""
Method responsible for keeping the updates of each screen, in addition to triggering
events if self.internal_cycle_events is True.
[OBS] This method should only be called within the main project cycle.
:param events_get:
"""
current: Screen = self.__screens[self.current]
# checks if events are internal.
if self.internal_cycle_events:
self.__internal_screen_events__(events_get)
# check if you are entering the current screen.
if self.__call_control['on_pre_enter']:
self.__call_control['on_pre_enter'] = False
current.on_pre_enter()
# here is calling the methods responsible for rendering and updating the current screen.
self.surface.blit(current.surface, (0, 0))
current.update(events_get)
# checks if the screen has started.
if self.__call_control['on_enter']:
self.__call_control['on_enter'] = False
current.on_enter()
def external_screen_events(self, _event):
"""
The function of this method is to create a private 'sandbox' of events, thus without having
any changes in other screens.
The advantage of using this method in particular, than using the built-in 'sandbox' created
by the manager itself of screens, is that it will be called directly in the main event cycle, thus
improving the project performance.
[OBS] This method must be called within the main event cycle of the project.
:param _event: must be the events variable of the main event loop
'for event in pygame.event.get()'
↑ ↑ ↑
this variable
"""
self.__screens[self.current].on_event(_event)
def add_screen(self, target):
"""
Method responsible for adding new screens, for the ScreenManager to work correctly,
at least one screen must be added, the screen that starts first in the project window
will be the first to be added.
:param target: Target class to be added.
:type target: Must receive a class with a Screen() meta class.
"""
# checks whether to start with the first screen added or start with the last one
if self.starts_first_screen:
if self.__no_screens:
self.__no_screens = False
self.current = target.name
else:
self.current = target.name
# add the class in question to one in a dictionary
self.__screens[target.name] = target
def change_current(self, target):
"""
Method responsible for making screen transitions, and executing some methods during the action.
:param target: target screen that will be changed to current.
"""
# reset call control.
self.__call_control['on_enter'] = True
self.__call_control['on_pre_enter'] = True
# calling procedures before the screen is closed.
self.__screens[self.current].on_pre_exit()
old_current = self.current
# changing screen.
self.current = target
# calling procedures after the current old one has closed.
self.__screens[old_current].on_exit()
def __internal_screen_events__(self, __events):
# internal private event loop
for _event in __events:
self.__screens[self.current].on_event(_event)
class Screen(ABC):
def __init__(self, surface):
""" An abstracted base class where all your screens are based. """
# default screen name, based on lowercase class name.
self.name = self.__class__.__name__.lower()
# defining her surface and her size.
self.surface = pygame.Surface(surface.get_size())
@abstractmethod
def update(self, _events):
"""
Here will be all the updates of your screen, it will be called in the main cycle of the project
by ScreenManager().update() if it is the current screen.
If you need to associate some event you can use the _events parameter for this task.
"""
...
@abstractmethod
def on_event(self, _event):
"""
Similar to the update() method, this method is called in the main project cycle, where it can be
called by a Managed private sandbox or in the project's main event cycle.
The events will be associated with the _events parameter, so if you want to create a private event
within the sandbox of your screen, it will do this task.
"""
...
def on_enter(self):
""" Put here everything you wanted to be executed after entering the screen in question. """
...
def on_pre_enter(self):
""" Put here everything you wanted to be executed before entering the screen in question. """
...
def on_exit(self):
""" Put here everything you wanted to be executed after leaving the screen in question. """
...
def on_pre_exit(self):
""" Put here everything you wanted to be executed before leaving the screen in question. """
...
if __name__ == '__main__':
""" example of how to create screens. """
pygame.init()
# create a main surface:
display = pygame.display.set_mode([500, 320])
pygame.display.set_caption('screen test')
# define a managed, and set the canvas as default surface:
sm = ScreenManager(display)
# create a class for the first screen:
class Screen1(Screen):
def __init__(self, surface):
super().__init__(surface)
# set the update method
def update(self, _events):
# the surface to be drawn is the screen surface, which is the value of self.surface
#
# surface
# ↓ ↓ ↓ ↓
pygame.draw.rect(self.surface, [255, 255, 255], [50, 90, 90, 90])
pygame.draw.rect(self.surface, [255, 255, 255], [50, 200, 90, 90])
# define the on_event method:
def on_event(self, _event):
if _event.type == pygame.KEYDOWN:
# once SPACE key is pressed change the current screen to the defined target.
if _event.key == pygame.K_SPACE:
sm.change_current('screen2')
# as soon as it exits it will print the content:
def on_exit(self):
print('it went out')
class Screen2(Screen):
def __init__(self, surface):
super().__init__(surface)
# set the update method
def update(self, _events):
# the surface to be drawn is the screen surface, which is the value of self.surface
#
# surface
# ↓ ↓ ↓ ↓
pygame.draw.rect(self.surface, [255, 255, 255], [200, 95, 100, 100])
# define the on_event method:
def on_event(self, _event):
if _event.type == pygame.KEYDOWN:
# once SPACE key is pressed change the current screen to the defined target.
if _event.key == pygame.K_SPACE:
sm.change_current('screen1')
# before entering will print entering
def on_pre_enter(self):
print('entering')
# add to the screens in the manager and it must be passed in.surface as a parameter:
# obs: as screen1 was added first, it will be the screen that will be initialized along with the display.
sm.add_screen(Screen1(sm.surface))
sm.add_screen(Screen2(sm.surface))
# disabling the manager's built-in event loops:
sm.internal_cycle_events = False
# create the main project cycle:
running = True
while running:
# create a variable for events:
events = pygame.event.get()
# create the event loop
for event in events:
if event.type == pygame.QUIT:
running = False
# as it was set sm.internal_cycle_events to False it will be necessary to add this method:
# event must be passed as parameter.
sm.external_screen_events(event)
# updating the manager:
sm.update(events)
pygame.display.update()
```
|
{
"source": "JeffersonBC/eshop-index-back",
"score": 2
}
|
#### File: classification/api/tag.py
```python
from django.db import IntegrityError
from django.db.models import CharField, Count, Q, F, OuterRef, Exists
from django.db.models.functions import Coalesce
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from classification.models import ConfirmedTag, TagGroup, Tag, SuggestedTag
from games.models import SwitchGame
from eshop_crawler.settings import VOTE_TAG_UPPERBOUND, VOTE_TAG_LOWERBOUND
from ..serializers import TagGroupSerializer, TagSerializer
# TAG GROUPS
# **********
@api_view(['GET'])
@permission_classes((IsAuthenticated, IsAdminUser))
def all_tag_groups(request):
response = map(
lambda x: {'id': x.id, 'name': x.name},
TagGroup.objects.all().order_by('name'))
return Response(response, status=status.HTTP_200_OK)
@api_view(['GET', 'PATCH', 'DELETE'])
@permission_classes((IsAuthenticated, IsAdminUser))
def tag_group(request, id):
if request.method == 'GET':
return tag_group_get(request, id)
elif request.method == 'PATCH':
return tag_group_update(request, id)
elif request.method == 'DELETE':
return tag_group_delete(request, id)
def tag_group_get(request, id):
instance = get_object_or_404(TagGroup, id=id)
serialized = TagGroupSerializer(instance=instance)
return Response(serialized.data, status=status.HTTP_200_OK)
def tag_group_update(request, id):
tag_group = get_object_or_404(TagGroup, id=id)
updated_tag_group = TagGroupSerializer(
data=request.data, instance=tag_group)
success = updated_tag_group.is_valid()
if success:
updated_tag_group.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
def tag_group_delete(request, id):
tag_group = get_object_or_404(TagGroup, id=id)
tag_group.delete()
return Response(status=status.HTTP_200_OK)
@api_view(['POST'])
@permission_classes((IsAuthenticated, IsAdminUser))
def tag_group_post(request):
tag_group = TagGroupSerializer(data=request.data)
success = tag_group.is_valid()
if success:
tag_group.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
# TAGS
# ****
@api_view(['GET'])
def all_tags_of_game(request, game_code):
game = get_object_or_404(SwitchGame, game_code_unique=game_code)
tag_groups = {}
for group in TagGroup.objects.all():
tag_groups[group.id] = group.name
tags = {}
tag_ids = map(
lambda x: x['tag__id'],
game.confirmedtag_set.distinct('tag').values('tag__id'))
conf_tag_query = Tag.objects \
.filter(id__in=tag_ids) \
.annotate(votes=Count('suggestedtag',
filter=Q(suggestedtag__game=game))) \
.order_by('-votes', 'name')
print(conf_tag_query.values('name', 'votes'))
for tag in conf_tag_query:
if tag_groups[tag.tag_group.id] not in tags:
tags[tag_groups[tag.tag_group.id]] = []
tags[tag_groups[tag.tag_group.id]].append({
'id': tag.id,
'name': tag.name,
})
return Response(tags, status=status.HTTP_200_OK)
@api_view(['GET'])
def all_tags_by_group(request):
tag_groups = {}
for group in TagGroup.objects.all():
tag_groups[group.id] = group.name
tags = {}
for tag in Tag.objects.all().order_by('name'):
if tag_groups[tag.tag_group.id] not in tags:
tags[tag_groups[tag.tag_group.id]] = []
tags[tag_groups[tag.tag_group.id]].append(
{'id': tag.id, 'name': tag.name}
)
return Response(tags, status=status.HTTP_200_OK)
@api_view(['GET'])
def all_tags_by_votable_group(request):
tag_groups = {}
for group in TagGroup.objects.filter(allow_vote=True):
tag_groups[group.id] = group.name
tags = {}
for tag in Tag.objects.filter(tag_group__allow_vote=True).order_by('name'):
if tag_groups[tag.tag_group.id] not in tags:
tags[tag_groups[tag.tag_group.id]] = []
tags[tag_groups[tag.tag_group.id]].append(
{'id': tag.id, 'name': tag.name}
)
return Response(tags, status=status.HTTP_200_OK)
@api_view(['GET'])
def all_tags_by_searcheable_group(request):
tag_groups = {}
for group in TagGroup.objects.all():
tag_groups[group.id] = group.name
tags = {}
for tag in Tag.objects \
.annotate(games=Count('confirmedtag__game',
filter=Q(confirmedtag__game__hide=False),
distinct=True)) \
.filter(games__gte=F('tag_group__min_games_for_search')) \
.order_by('name'):
if tag_groups[tag.tag_group.id] not in tags:
tags[tag_groups[tag.tag_group.id]] = []
tags[tag_groups[tag.tag_group.id]].append(
{'id': tag.id, 'name': tag.name}
)
return Response(tags, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes((IsAuthenticated, IsAdminUser))
def all_tags(request):
response = map(
lambda x: {'id': x.id, 'name': x.name, 'group': x.tag_group.name},
Tag.objects.all().order_by('tag_group__name','name'))
return Response(response, status=status.HTTP_200_OK)
@api_view(['GET', 'PATCH', 'DELETE'])
@permission_classes((IsAuthenticated, IsAdminUser))
def tag(request, id):
if request.method == 'GET':
return tag_get(request, id)
elif request.method == 'PATCH':
return tag_update(request, id)
elif request.method == 'DELETE':
return tag_delete(request, id)
def tag_get(request, id):
tag = get_object_or_404(Tag, id=id)
response = {
'id': tag.id,
'name': tag.name,
'group': {'id': tag.tag_group.id, 'name': tag.tag_group.name}
}
return Response(response, status=status.HTTP_200_OK)
def tag_update(request, id):
tag = get_object_or_404(Tag, id=id)
tag_group = get_object_or_404(TagGroup, id=request.data['group_id'])
updated_tag = TagSerializer(
data=request.data,
instance=tag,
context={'tag_group': tag_group})
success = updated_tag.is_valid()
if success:
updated_tag.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
def tag_delete(request, id):
tag = get_object_or_404(Tag, id=id)
tag.delete()
return Response(status=status.HTTP_200_OK)
@api_view(['POST'])
@permission_classes((IsAuthenticated, IsAdminUser))
def tag_post(request):
tag_group = get_object_or_404(TagGroup, id=request.data['group_id'])
tag = TagSerializer(
data=request.data,
context={'tag_group': tag_group})
success = tag.is_valid()
if success:
tag.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@permission_classes((IsAuthenticated, IsAdminUser))
def tag_merge(request, tag1_id, tag2_id):
tag1 = get_object_or_404(Tag, id=tag1_id)
tag2 = get_object_or_404(Tag, id=tag2_id)
confirmed_tag2 = ConfirmedTag.objects.filter(tag_id=tag2_id)
suggested_tag2 = SuggestedTag.objects.filter(tag_id=tag2_id)
for query in [confirmed_tag2, suggested_tag2]:
for item in query:
try:
item.tag_id = tag1_id
item.save()
except IntegrityError:
item.delete()
tag2.delete()
return Response(status=status.HTTP_200_OK)
# CONFIRMED TAGS
# ****
@api_view(['GET'])
@permission_classes((IsAuthenticated, IsAdminUser))
def confirmed_tags_of_game(request, game_id):
game = get_object_or_404(SwitchGame, id=game_id)
confirmed_by_nintendo = game.confirmedtag_set.filter(confirmed_by='NTD')
confirmed_by_staff = game.confirmedtag_set.filter(confirmed_by='STF')
response = {
'nintendo': dict(zip(
map(lambda x: x.tag.id, confirmed_by_nintendo),
[True] * confirmed_by_nintendo.count(),
)),
'staff': dict(zip(
map(lambda x: x.tag.id, confirmed_by_staff),
[True] * confirmed_by_staff.count(),
))
}
return Response(response, status=status.HTTP_200_OK)
@api_view(['POST', 'DELETE'])
@permission_classes((IsAuthenticated, IsAdminUser))
def confirm_tag_staff(request, tag_id, game_id):
if request.method == 'POST':
return confirmed_tag_staff_add(request, tag_id, game_id)
elif request.method == 'DELETE':
return confirmed_tag_staff_remove(request, tag_id, game_id)
def confirmed_tag_staff_add(request, tag_id, game_id):
game = get_object_or_404(SwitchGame, id=game_id)
tag = get_object_or_404(Tag, id=tag_id)
confirmation = ConfirmedTag(tag=tag, game=game, confirmed_by='STF')
confirmation.save()
return Response(status=status.HTTP_200_OK)
def confirmed_tag_staff_remove(request, tag_id, game_id):
game = get_object_or_404(SwitchGame, id=game_id)
tag = get_object_or_404(Tag, id=tag_id)
confirmation = get_object_or_404(
ConfirmedTag,
tag=tag, game=game, confirmed_by='STF')
confirmation.delete()
return Response(status=status.HTTP_200_OK)
@api_view(['DELETE'])
@permission_classes((IsAuthenticated, IsAdminUser))
def unconfirm_tag_nintendo(request, tag_id, game_id):
instance = get_object_or_404(
ConfirmedTag, tag_id=tag_id, game_id=game_id, confirmed_by='NTD')
try:
instance.delete()
return Response(status=status.HTTP_200_OK)
except Exception as e:
print('Error unconfirming tag {} for game {}'
.format(tag_id, game_id))
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# VOTE FOR TAG
# ****
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def voted_tags_of_game(request, game_code):
game = get_object_or_404(SwitchGame, game_code_unique=game_code)
votes = game.suggestedtag_set.filter(user=request.user)
response = dict(zip(
map(lambda x: x.tag.id, votes),
[True] * votes.count()))
return Response(response, status=status.HTTP_200_OK)
@api_view(['POST', 'DELETE'])
@permission_classes((IsAuthenticated,))
def vote_tag(request, tag_id, game_code):
if request.method == 'POST':
return vote_tag_post(request, tag_id, game_code)
if request.method == 'DELETE':
return vote_tag_delete(request, tag_id, game_code)
def vote_tag_post(request, tag_id, game_code):
game = get_object_or_404(SwitchGame, game_code_unique=game_code)
tag = get_object_or_404(Tag, id=tag_id)
# If tag from unvotable group, raise an error
if not tag.tag_group.allow_vote:
return Response(status=status.HTTP_400_BAD_REQUEST)
# If already exists, raise an error
try:
instance = SuggestedTag.objects.get(
game=game, tag=tag, user=request.user)
return Response(status=status.HTTP_400_BAD_REQUEST)
except SuggestedTag.DoesNotExist:
pass
vote_tag = SuggestedTag(game=game, tag=tag, user=request.user)
try:
vote_tag.save()
confirm_tag_by_vote(game, tag)
return Response(status=status.HTTP_200_OK)
except Exception as e:
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def vote_tag_delete(request, tag_id, game_code):
game = get_object_or_404(SwitchGame, game_code_unique=game_code)
tag = get_object_or_404(Tag, id=tag_id)
vote_tag = get_object_or_404(
SuggestedTag, game=game, tag=tag, user=request.user)
try:
vote_tag.delete()
unconfirm_tag_by_vote(game, tag)
return Response(status=status.HTTP_200_OK)
except Exception as e:
print('Error deleting alike for games {} and {}'
.format(game1, game2))
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def confirm_tag_by_vote(game, tag):
votes_count = SuggestedTag.objects.filter(game=game, tag=tag).count()
if votes_count >= VOTE_TAG_UPPERBOUND:
if ConfirmedTag.objects \
.filter(game=game, tag=tag, confirmed_by='VOT') \
.count() == 0:
confirmed = ConfirmedTag(game=game, tag=tag, confirmed_by='VOT')
confirmed.save()
def unconfirm_tag_by_vote(game, tag):
votes_count = SuggestedTag.objects.filter(game=game, tag=tag).count()
if votes_count <= VOTE_TAG_LOWERBOUND:
if ConfirmedTag.objects \
.filter(game=game, tag=tag, confirmed_by='VOT') \
.count():
confirmed = ConfirmedTag.objects.get(
game=game, tag=tag, confirmed_by='VOT')
confirmed.delete()
# SUGGESTED TAGS
# ****
@api_view(['GET'])
@permission_classes((IsAuthenticated, IsAdminUser))
def all_unconfirmed_suggested_tags(request):
confirmed_subquery = ConfirmedTag.objects \
.filter(game=OuterRef('game'), tag=OuterRef('tag'))
unconf_sugg_tags = SuggestedTag.objects \
.annotate(already_confirmed=Exists(confirmed_subquery)) \
.filter(already_confirmed=False) \
.annotate(game_title=Coalesce(
'game__game_eu__title', 'game__game_us__title')) \
.annotate(game_image=Coalesce(
'game__game_eu__image_sq_h2_url', 'game__game_us__front_box_art',
output_field=CharField())) \
.values(
'game__id', 'game__game_code_unique', 'game_title', 'game_image',
'tag__id', 'tag__name') \
.distinct()
response = {}
for suggestion in unconf_sugg_tags:
if suggestion['game__id'] not in response:
response[suggestion['game__id']] = {
'title': suggestion['game_title'],
'game_code': suggestion['game__game_code_unique'],
'game_image': suggestion['game_image'],
'tags': {},
}
response[suggestion['game__id']]['tags'][suggestion['tag__id']] = suggestion['tag__name']
return Response(response, status=status.HTTP_200_OK)
```
#### File: games/models/switch_game.py
```python
from django.db import models
from . import SwitchGameUS, SwitchGameEU
class SwitchGame(models.Model):
game_us = models.OneToOneField(
SwitchGameUS,
on_delete=models.SET_NULL,
null=True,
blank=True,
)
game_eu = models.OneToOneField(
SwitchGameEU,
on_delete=models.SET_NULL,
null=True,
blank=True,
)
game_code_unique = models.CharField(max_length=5, unique=True)
hide = models.BooleanField(default=False)
@property
def title(self):
return self.game_eu.title if self.game_eu else self.game_us.title
def __str__(self):
return self.game_code_unique
```
#### File: games/serializers/price.py
```python
from rest_framework import serializers
from ..models import SwitchGamePrice, SwitchGameSale
class SwitchGamePriceSerializer(serializers.Serializer):
amount = serializers.CharField(max_length=12)
currency = serializers.CharField(max_length=8)
raw_value = serializers.CharField(max_length=10)
def create(self, validated_data):
price = SwitchGamePrice(
game=self.context['game'],
country=self.context['country'],
amount=validated_data['amount'],
currency=validated_data['currency'],
raw_value=float(validated_data['raw_value']),
)
price.save()
return price
def update(self, instance, validated_data):
instance.amount = validated_data['amount']
instance.currency = validated_data['currency']
instance.raw_value = float(validated_data['raw_value'])
instance.save()
return instance
class SwitchGameSaleSerializer(serializers.Serializer):
amount = serializers.CharField(max_length=12)
currency = serializers.CharField(max_length=8)
raw_value = serializers.CharField(max_length=10)
start_datetime = serializers.DateTimeField(required=False)
end_datetime = serializers.DateTimeField(required=False)
def create(self, validated_data):
price = SwitchGameSale(
game=self.context['game'],
country=self.context['country'],
amount=validated_data['amount'],
currency=validated_data['currency'],
raw_value=float(validated_data['raw_value']),
start_datetime=validated_data['start_datetime'],
end_datetime=validated_data['end_datetime'],
)
price.save()
return price
def update(self, instance, validated_data):
instance.amount = validated_data['amount']
instance.currency = validated_data['currency']
instance.raw_value = float(validated_data['raw_value'])
start_datetime = validated_data['start_datetime']
end_datetime = validated_data['end_datetime']
instance.save()
return instance
```
#### File: games/serializers/switch_game_us.py
```python
from django.contrib.auth import get_user_model
from rest_framework import serializers
from datetime import datetime
import re
from ..models import SwitchGameUS, SwitchGame
class SwitchGameUSSerializer(serializers.Serializer):
title = serializers.CharField(max_length=128)
slug = serializers.SlugField(max_length=128)
release_date = serializers.CharField(max_length=12)
nsuid = serializers.CharField(max_length=14, required=False)
game_code = serializers.CharField(max_length=18)
front_box_art = serializers.URLField()
video_link = serializers.CharField(max_length=32, required=False)
def create(self, validated_data):
switch_game_us = self.validated_data_to_new(validated_data)
try:
switch_game_us.save()
except Exception as e:
print('Error while saving us game {} ({})'
.format(switch_game_us, e))
return switch_game_us
clean_game_code = re.sub(
r'[\-\. ]+', '',
validated_data.get('game_code')
)
# If Game already in DB, update it with US Game
if (
SwitchGame.objects.filter(game_code_unique=clean_game_code[4:9])
.exists()
):
switch_game = SwitchGame.objects.get(
game_code_unique=clean_game_code[4:9])
switch_game.game_us = switch_game_us
# If Game not yet in DB, add one and assign US Game to it
else:
switch_game = SwitchGame(
game_us=switch_game_us,
game_code_unique=clean_game_code[4:9]
)
try:
switch_game.save()
except Exception as e:
print('Error while saving game {} ({})'.format(switch_game, e))
return switch_game_us
def update(self, instance, validated_data):
release_datetime = datetime.strptime(
validated_data.get('release_date'), '%b %d, %Y')
clean_game_code = re.sub(
r'[a-zA-Z0-9]+', '',
validated_data.get('game_code', instance.game_code)
)
instance.title = validated_data.get('title', instance.title)
instance.slug = validated_data.get('slug', instance.slug)
instance.release_date = release_datetime
instance.nsuid = validated_data.get('nsuid', instance.nsuid)
instance.game_code_system = clean_game_code[0:3]
instance.game_code_region = clean_game_code[3:4]
instance.game_code_unique = clean_game_code[4:9]
instance.front_box_art = validated_data.get(
'front_box_art', instance.front_box_art)
instance.video_link = validated_data.get(
'video_link', instance.video_link)
instance.save()
return instance
def validated_data_to_new(self, validated_data):
release_datetime = datetime.strptime(
validated_data.get('release_date'), '%b %d, %Y')
clean_game_code = re.sub(
r'[\-\. ]+', '', validated_data.get('game_code'))
switch_game_us = SwitchGameUS(
title=validated_data.get('title'),
slug=validated_data.get('slug')[0:50],
release_date=release_datetime,
nsuid=validated_data.get('nsuid'),
game_code_system=clean_game_code[0:3],
game_code_region=clean_game_code[3:4],
game_code_unique=clean_game_code[4:9],
front_box_art=validated_data.get('front_box_art'),
video_link=validated_data.get('video_link'),
)
return switch_game_us
```
#### File: games/tasks/update_switch_price.py
```python
from celery import shared_task
from games.models import (
SwitchGame,
SwitchGameUS,
SwitchGameEU,
SwitchGamePrice,
SwitchGameSale,
)
from games.tasks.update_utils import treated_request
from games.serializers import (
SwitchGamePriceSerializer,
SwitchGameSaleSerializer,
)
@shared_task()
def update_switch_price():
print('Updating Switch games\' prices...')
# Prices in the America region
for country in ['US', 'CA', 'MX']: # , 'AR', 'BR', 'CL']:
update_country(country, SwitchGameUS)
# Prices in the Europe region
for country in ['GB', 'DE', 'FR', 'ZA', 'RU']:
update_country(country, SwitchGameEU)
print('Finished updating Switch games\' prices.')
def update_country(country, model):
url = 'https://api.ec.nintendo.com/v1/price'
count = model.objects.count()
found_price = 0
found_sales = 0
for offset in range(0, count, 50):
print('Updating {}\'s price offset {}'.format(country, offset))
games = model.objects.all()[offset:offset+50].values('nsuid')
games = list(map(lambda game: game['nsuid'], games))
games = ','.join([nsuid for nsuid in games if nsuid != None])
params = {'lang': 'en', 'country': country, 'ids': games}
req = treated_request(url, params, 'US Switch price')
data = req.json()['prices']
for price_info in data:
if model.objects.filter(nsuid=price_info['title_id']).count() > 1:
print('Multiple games found for nsuid {}'.format(price_info['title_id']))
game = model.objects.filter(nsuid=price_info['title_id'])[0]
if 'regular_price' in price_info:
found_price = found_price + 1
if SwitchGamePrice.objects.filter(game=game.switchgame,
country=country).exists():
price = SwitchGamePrice.objects.get(
game=game.switchgame, country=country)
serialized = SwitchGamePriceSerializer(
data=price_info['regular_price'],
context={'game': game.switchgame, 'country': country},
instance=price)
else:
serialized = SwitchGamePriceSerializer(
data=price_info['regular_price'],
context={'game': game.switchgame, 'country': country})
if serialized.is_valid():
serialized.save()
if 'discount_price' in price_info:
found_sales = found_sales + 1
if SwitchGameSale.objects.filter(game=game.switchgame,
country=country).exists():
price = SwitchGameSale.objects.get(
game=game.switchgame, country=country)
serialized = SwitchGameSaleSerializer(
data=price_info['discount_price'],
instance=price,
context={'game': game.switchgame, 'country': country})
else:
serialized = SwitchGameSaleSerializer(
data=price_info['discount_price'],
context={'game': game.switchgame, 'country': country})
if serialized.is_valid():
serialized.save()
else:
SwitchGameSale.objects \
.filter(game=game.switchgame, country=country).delete()
print('Found {} prices and {} sales for country {}'
.format(found_price, found_sales, country))
```
#### File: users/serializers/user_profile.py
```python
from django.contrib.auth.models import AnonymousUser
from users.models import Following
def user_to_profile_json(user, request):
profile_json = {
'username': user.username,
}
if user.id == request.user.id:
profile_json['is_following'] = None
elif type(request.user) == AnonymousUser:
profile_json['is_following'] = False
else:
profile_json['is_following'] = Following.objects.filter(
follower=request.user, followed=user).exists()
return profile_json
def user_to_card_json(user):
json = {}
json['username'] = user.username
json['likes'] = user.recomendation_set.filter(recomends=True).count()
json['dislikes'] = user.recomendation_set.filter(recomends=False).count()
json['reviews'] = user.review_set.count()
return json
```
|
{
"source": "JeffersonBledsoe/core",
"score": 2
}
|
#### File: components/freedompro/__init__.py
```python
from datetime import timedelta
import logging
from pyfreedompro import get_list, get_states
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [
"binary_sensor",
"climate",
"cover",
"fan",
"light",
"lock",
"sensor",
"switch",
]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Freedompro from a config entry."""
hass.data.setdefault(DOMAIN, {})
api_key = entry.data[CONF_API_KEY]
coordinator = FreedomproDataUpdateCoordinator(hass, api_key)
await coordinator.async_config_entry_first_refresh()
entry.async_on_unload(entry.add_update_listener(update_listener))
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Update listener."""
await hass.config_entries.async_reload(config_entry.entry_id)
class FreedomproDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Freedompro data API."""
def __init__(self, hass, api_key):
"""Initialize."""
self._hass = hass
self._api_key = api_key
self._devices = None
update_interval = timedelta(minutes=1)
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
async def _async_update_data(self):
if self._devices is None:
result = await get_list(
aiohttp_client.async_get_clientsession(self._hass), self._api_key
)
if result["state"]:
self._devices = result["devices"]
else:
raise UpdateFailed()
result = await get_states(
aiohttp_client.async_get_clientsession(self._hass), self._api_key
)
for device in self._devices:
dev = next(
(dev for dev in result if dev["uid"] == device["uid"]),
None,
)
if dev is not None and "state" in dev:
device["state"] = dev["state"]
return self._devices
```
#### File: components/fronius/sensor.py
```python
from __future__ import annotations
import copy
from datetime import timedelta
import logging
from typing import Any
from pyfronius import Fronius, FroniusError
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
)
from homeassistant.const import (
CONF_DEVICE,
CONF_MONITORED_CONDITIONS,
CONF_RESOURCE,
CONF_SCAN_INTERVAL,
CONF_SENSOR_TYPE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_POWER_FACTOR,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
DEVICE_CLASS_VOLTAGE,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
CONF_SCOPE = "scope"
TYPE_INVERTER = "inverter"
TYPE_STORAGE = "storage"
TYPE_METER = "meter"
TYPE_POWER_FLOW = "power_flow"
TYPE_LOGGER_INFO = "logger_info"
SCOPE_DEVICE = "device"
SCOPE_SYSTEM = "system"
DEFAULT_SCOPE = SCOPE_DEVICE
DEFAULT_DEVICE = 0
DEFAULT_INVERTER = 1
DEFAULT_SCAN_INTERVAL = timedelta(seconds=60)
SENSOR_TYPES = [
TYPE_INVERTER,
TYPE_STORAGE,
TYPE_METER,
TYPE_POWER_FLOW,
TYPE_LOGGER_INFO,
]
SCOPE_TYPES = [SCOPE_DEVICE, SCOPE_SYSTEM]
PREFIX_DEVICE_CLASS_MAPPING = [
("state_of_charge", DEVICE_CLASS_BATTERY),
("temperature", DEVICE_CLASS_TEMPERATURE),
("power_factor", DEVICE_CLASS_POWER_FACTOR),
("power", DEVICE_CLASS_POWER),
("energy", DEVICE_CLASS_ENERGY),
("current", DEVICE_CLASS_CURRENT),
("timestamp", DEVICE_CLASS_TIMESTAMP),
("voltage", DEVICE_CLASS_VOLTAGE),
]
PREFIX_STATE_CLASS_MAPPING = [
("state_of_charge", STATE_CLASS_MEASUREMENT),
("temperature", STATE_CLASS_MEASUREMENT),
("power_factor", STATE_CLASS_MEASUREMENT),
("power", STATE_CLASS_MEASUREMENT),
("energy", STATE_CLASS_TOTAL_INCREASING),
("current", STATE_CLASS_MEASUREMENT),
("timestamp", STATE_CLASS_MEASUREMENT),
("voltage", STATE_CLASS_MEASUREMENT),
]
def _device_id_validator(config):
"""Ensure that inverters have default id 1 and other devices 0."""
config = copy.deepcopy(config)
for cond in config[CONF_MONITORED_CONDITIONS]:
if CONF_DEVICE not in cond:
if cond[CONF_SENSOR_TYPE] == TYPE_INVERTER:
cond[CONF_DEVICE] = DEFAULT_INVERTER
else:
cond[CONF_DEVICE] = DEFAULT_DEVICE
return config
PLATFORM_SCHEMA = vol.Schema(
vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Optional(CONF_SCOPE, default=DEFAULT_SCOPE): vol.In(
SCOPE_TYPES
),
vol.Optional(CONF_DEVICE): cv.positive_int,
}
],
),
}
),
_device_id_validator,
)
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up of Fronius platform."""
session = async_get_clientsession(hass)
fronius = Fronius(session, config[CONF_RESOURCE])
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
adapters = []
# Creates all adapters for monitored conditions
for condition in config[CONF_MONITORED_CONDITIONS]:
device = condition[CONF_DEVICE]
sensor_type = condition[CONF_SENSOR_TYPE]
scope = condition[CONF_SCOPE]
name = f"Fronius {condition[CONF_SENSOR_TYPE].replace('_', ' ').capitalize()} {device if scope == SCOPE_DEVICE else SCOPE_SYSTEM} {config[CONF_RESOURCE]}"
if sensor_type == TYPE_INVERTER:
if scope == SCOPE_SYSTEM:
adapter_cls = FroniusInverterSystem
else:
adapter_cls = FroniusInverterDevice
elif sensor_type == TYPE_METER:
if scope == SCOPE_SYSTEM:
adapter_cls = FroniusMeterSystem
else:
adapter_cls = FroniusMeterDevice
elif sensor_type == TYPE_POWER_FLOW:
adapter_cls = FroniusPowerFlow
elif sensor_type == TYPE_LOGGER_INFO:
adapter_cls = FroniusLoggerInfo
else:
adapter_cls = FroniusStorage
adapters.append(adapter_cls(fronius, name, device, async_add_entities))
# Creates a lamdba that fetches an update when called
def adapter_data_fetcher(data_adapter):
async def fetch_data(*_):
await data_adapter.async_update()
return fetch_data
# Set up the fetching in a fixed interval for each adapter
for adapter in adapters:
fetch = adapter_data_fetcher(adapter)
# fetch data once at set-up
await fetch()
async_track_time_interval(hass, fetch, scan_interval)
class FroniusAdapter:
"""The Fronius sensor fetching component."""
def __init__(
self, bridge: Fronius, name: str, device: int, add_entities: AddEntitiesCallback
) -> None:
"""Initialize the sensor."""
self.bridge = bridge
self._name = name
self._device = device
self._fetched: dict[str, Any] = {}
self._available = True
self.sensors: set[str] = set()
self._registered_sensors: set[SensorEntity] = set()
self._add_entities = add_entities
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def data(self):
"""Return the state attributes."""
return self._fetched
@property
def available(self):
"""Whether the fronius device is active."""
return self._available
async def async_update(self):
"""Retrieve and update latest state."""
try:
values = await self._update()
except FroniusError as err:
# fronius devices are often powered by self-produced solar energy
# and henced turned off at night.
# Therefore we will not print multiple errors when connection fails
if self._available:
self._available = False
_LOGGER.error("Failed to update: %s", err)
return
self._available = True # reset connection failure
attributes = self._fetched
# Copy data of current fronius device
for key, entry in values.items():
# If the data is directly a sensor
if "value" in entry:
attributes[key] = entry
self._fetched = attributes
# Add discovered value fields as sensors
# because some fields are only sent temporarily
new_sensors = []
for key in attributes:
if key not in self.sensors:
self.sensors.add(key)
_LOGGER.info("Discovered %s, adding as sensor", key)
new_sensors.append(FroniusTemplateSensor(self, key))
self._add_entities(new_sensors, True)
# Schedule an update for all included sensors
for sensor in self._registered_sensors:
sensor.async_schedule_update_ha_state(True)
async def _update(self) -> dict:
"""Return values of interest."""
@callback
def register(self, sensor):
"""Register child sensor for update subscriptions."""
self._registered_sensors.add(sensor)
return lambda: self._registered_sensors.remove(sensor)
class FroniusInverterSystem(FroniusAdapter):
"""Adapter for the fronius inverter with system scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_system_inverter_data()
class FroniusInverterDevice(FroniusAdapter):
"""Adapter for the fronius inverter with device scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_inverter_data(self._device)
class FroniusStorage(FroniusAdapter):
"""Adapter for the fronius battery storage."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_storage_data(self._device)
class FroniusMeterSystem(FroniusAdapter):
"""Adapter for the fronius meter with system scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_system_meter_data()
class FroniusMeterDevice(FroniusAdapter):
"""Adapter for the fronius meter with device scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_meter_data(self._device)
class FroniusPowerFlow(FroniusAdapter):
"""Adapter for the fronius power flow."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_power_flow()
class FroniusLoggerInfo(FroniusAdapter):
"""Adapter for the fronius power flow."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_logger_info()
class FroniusTemplateSensor(SensorEntity):
"""Sensor for the single values (e.g. pv power, ac power)."""
def __init__(self, parent: FroniusAdapter, key: str) -> None:
"""Initialize a singular value sensor."""
self._key = key
self._attr_name = f"{key.replace('_', ' ').capitalize()} {parent.name}"
self._parent = parent
for prefix, device_class in PREFIX_DEVICE_CLASS_MAPPING:
if self._key.startswith(prefix):
self._attr_device_class = device_class
break
for prefix, state_class in PREFIX_STATE_CLASS_MAPPING:
if self._key.startswith(prefix):
self._attr_state_class = state_class
break
@property
def should_poll(self):
"""Device should not be polled, returns False."""
return False
@property
def available(self):
"""Whether the fronius device is active."""
return self._parent.available
async def async_update(self):
"""Update the internal state."""
state = self._parent.data.get(self._key)
self._attr_native_value = state.get("value")
if isinstance(self._attr_native_value, float):
self._attr_native_value = round(self._attr_native_value, 2)
self._attr_native_unit_of_measurement = state.get("unit")
async def async_added_to_hass(self):
"""Register at parent component for updates."""
self.async_on_remove(self._parent.register(self))
def __hash__(self):
"""Hash sensor by hashing its name."""
return hash(self.name)
```
#### File: components/icloud/__init__.py
```python
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, ServiceDataType
from homeassistant.util import slugify
from .account import IcloudAccount
from .const import (
CONF_GPS_ACCURACY_THRESHOLD,
CONF_MAX_INTERVAL,
CONF_WITH_FAMILY,
DEFAULT_GPS_ACCURACY_THRESHOLD,
DEFAULT_MAX_INTERVAL,
DEFAULT_WITH_FAMILY,
DOMAIN,
PLATFORMS,
STORAGE_KEY,
STORAGE_VERSION,
)
ATTRIBUTION = "Data provided by Apple iCloud"
# entity attributes
ATTR_ACCOUNT_FETCH_INTERVAL = "account_fetch_interval"
ATTR_BATTERY = "battery"
ATTR_BATTERY_STATUS = "battery_status"
ATTR_DEVICE_NAME = "device_name"
ATTR_DEVICE_STATUS = "device_status"
ATTR_LOW_POWER_MODE = "low_power_mode"
ATTR_OWNER_NAME = "owner_fullname"
# services
SERVICE_ICLOUD_PLAY_SOUND = "play_sound"
SERVICE_ICLOUD_DISPLAY_MESSAGE = "display_message"
SERVICE_ICLOUD_LOST_DEVICE = "lost_device"
SERVICE_ICLOUD_UPDATE = "update"
ATTR_ACCOUNT = "account"
ATTR_LOST_DEVICE_MESSAGE = "message"
ATTR_LOST_DEVICE_NUMBER = "number"
ATTR_LOST_DEVICE_SOUND = "sound"
SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ACCOUNT): cv.string})
SERVICE_SCHEMA_PLAY_SOUND = vol.Schema(
{vol.Required(ATTR_ACCOUNT): cv.string, vol.Required(ATTR_DEVICE_NAME): cv.string}
)
SERVICE_SCHEMA_DISPLAY_MESSAGE = vol.Schema(
{
vol.Required(ATTR_ACCOUNT): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_LOST_DEVICE_MESSAGE): cv.string,
vol.Optional(ATTR_LOST_DEVICE_SOUND): cv.boolean,
}
)
SERVICE_SCHEMA_LOST_DEVICE = vol.Schema(
{
vol.Required(ATTR_ACCOUNT): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_LOST_DEVICE_NUMBER): cv.string,
vol.Required(ATTR_LOST_DEVICE_MESSAGE): cv.string,
}
)
ACCOUNT_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_WITH_FAMILY, default=DEFAULT_WITH_FAMILY): cv.boolean,
vol.Optional(CONF_MAX_INTERVAL, default=DEFAULT_MAX_INTERVAL): cv.positive_int,
vol.Optional(
CONF_GPS_ACCURACY_THRESHOLD, default=DEFAULT_GPS_ACCURACY_THRESHOLD
): cv.positive_int,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [ACCOUNT_SCHEMA]))},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up iCloud from legacy config file."""
conf = config.get(DOMAIN)
if conf is None:
return True
for account_conf in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=account_conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up an iCloud account from a config entry."""
hass.data.setdefault(DOMAIN, {})
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
with_family = entry.data[CONF_WITH_FAMILY]
max_interval = entry.data[CONF_MAX_INTERVAL]
gps_accuracy_threshold = entry.data[CONF_GPS_ACCURACY_THRESHOLD]
# For backwards compat
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=username)
icloud_dir = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
account = IcloudAccount(
hass,
username,
password,
icloud_dir,
with_family,
max_interval,
gps_accuracy_threshold,
entry,
)
await hass.async_add_executor_job(account.setup)
hass.data[DOMAIN][entry.unique_id] = account
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
def play_sound(service: ServiceDataType) -> None:
"""Play sound on the device."""
account = service.data[ATTR_ACCOUNT]
device_name = service.data.get(ATTR_DEVICE_NAME)
device_name = slugify(device_name.replace(" ", "", 99))
for device in _get_account(account).get_devices_with_name(device_name):
device.play_sound()
def display_message(service: ServiceDataType) -> None:
"""Display a message on the device."""
account = service.data[ATTR_ACCOUNT]
device_name = service.data.get(ATTR_DEVICE_NAME)
device_name = slugify(device_name.replace(" ", "", 99))
message = service.data.get(ATTR_LOST_DEVICE_MESSAGE)
sound = service.data.get(ATTR_LOST_DEVICE_SOUND, False)
for device in _get_account(account).get_devices_with_name(device_name):
device.display_message(message, sound)
def lost_device(service: ServiceDataType) -> None:
"""Make the device in lost state."""
account = service.data[ATTR_ACCOUNT]
device_name = service.data.get(ATTR_DEVICE_NAME)
device_name = slugify(device_name.replace(" ", "", 99))
number = service.data.get(ATTR_LOST_DEVICE_NUMBER)
message = service.data.get(ATTR_LOST_DEVICE_MESSAGE)
for device in _get_account(account).get_devices_with_name(device_name):
device.lost_device(number, message)
def update_account(service: ServiceDataType) -> None:
"""Call the update function of an iCloud account."""
account = service.data.get(ATTR_ACCOUNT)
if account is None:
for account in hass.data[DOMAIN].values():
account.keep_alive()
else:
_get_account(account).keep_alive()
def _get_account(account_identifier: str) -> any:
if account_identifier is None:
return None
icloud_account = hass.data[DOMAIN].get(account_identifier)
if icloud_account is None:
for account in hass.data[DOMAIN].values():
if account.username == account_identifier:
icloud_account = account
if icloud_account is None:
raise Exception(
f"No iCloud account with username or name {account_identifier}"
)
return icloud_account
hass.services.async_register(
DOMAIN, SERVICE_ICLOUD_PLAY_SOUND, play_sound, schema=SERVICE_SCHEMA_PLAY_SOUND
)
hass.services.async_register(
DOMAIN,
SERVICE_ICLOUD_DISPLAY_MESSAGE,
display_message,
schema=SERVICE_SCHEMA_DISPLAY_MESSAGE,
)
hass.services.async_register(
DOMAIN,
SERVICE_ICLOUD_LOST_DEVICE,
lost_device,
schema=SERVICE_SCHEMA_LOST_DEVICE,
)
hass.services.async_register(
DOMAIN, SERVICE_ICLOUD_UPDATE, update_account, schema=SERVICE_SCHEMA
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.data[CONF_USERNAME])
return unload_ok
```
#### File: components/srp_energy/__init__.py
```python
import logging
from srpenergy.client import SrpEnergyClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ID, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from .const import SRP_ENERGY_DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the SRP Energy component from a config entry."""
# Store an SrpEnergyClient object for your srp_energy to access
try:
srp_energy_client = SrpEnergyClient(
entry.data.get(CONF_ID),
entry.data.get(CONF_USERNAME),
entry.data.get(CONF_PASSWORD),
)
hass.data[SRP_ENERGY_DOMAIN] = srp_energy_client
except (Exception) as ex:
_LOGGER.error("Unable to connect to Srp Energy: %s", str(ex))
raise ConfigEntryNotReady from ex
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
# unload srp client
hass.data[SRP_ENERGY_DOMAIN] = None
# Remove config entry
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
```
#### File: components/xiaomi_miio/vacuum.py
```python
from __future__ import annotations
from functools import partial
import logging
from miio import DeviceException
import voluptuous as vol
from homeassistant.components.vacuum import (
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.util.dt import as_utc
from . import VacuumCoordinatorData
from ...helpers.update_coordinator import DataUpdateCoordinator
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
DOMAIN,
KEY_COORDINATOR,
KEY_DEVICE,
SERVICE_CLEAN_SEGMENT,
SERVICE_CLEAN_ZONE,
SERVICE_GOTO,
SERVICE_MOVE_REMOTE_CONTROL,
SERVICE_MOVE_REMOTE_CONTROL_STEP,
SERVICE_START_REMOTE_CONTROL,
SERVICE_STOP_REMOTE_CONTROL,
)
from .device import XiaomiCoordinatedMiioEntity
_LOGGER = logging.getLogger(__name__)
ATTR_ERROR = "error"
ATTR_RC_DURATION = "duration"
ATTR_RC_ROTATION = "rotation"
ATTR_RC_VELOCITY = "velocity"
ATTR_STATUS = "status"
ATTR_ZONE_ARRAY = "zone"
ATTR_ZONE_REPEATER = "repeats"
ATTR_TIMERS = "timers"
SUPPORT_XIAOMI = (
SUPPORT_STATE
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
| SUPPORT_START
)
STATE_CODE_TO_STATE = {
1: STATE_IDLE, # "Starting"
2: STATE_IDLE, # "Charger disconnected"
3: STATE_IDLE, # "Idle"
4: STATE_CLEANING, # "Remote control active"
5: STATE_CLEANING, # "Cleaning"
6: STATE_RETURNING, # "Returning home"
7: STATE_CLEANING, # "Manual mode"
8: STATE_DOCKED, # "Charging"
9: STATE_ERROR, # "Charging problem"
10: STATE_PAUSED, # "Paused"
11: STATE_CLEANING, # "Spot cleaning"
12: STATE_ERROR, # "Error"
13: STATE_IDLE, # "Shutting down"
14: STATE_DOCKED, # "Updating"
15: STATE_RETURNING, # "Docking"
16: STATE_CLEANING, # "Going to target"
17: STATE_CLEANING, # "Zoned cleaning"
18: STATE_CLEANING, # "Segment cleaning"
100: STATE_DOCKED, # "Charging complete"
101: STATE_ERROR, # "Device offline"
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Xiaomi vacuum cleaner robot from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
name = config_entry.title
unique_id = config_entry.unique_id
mirobo = MiroboVacuum(
name,
hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE],
config_entry,
unique_id,
hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR],
)
entities.append(mirobo)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_START_REMOTE_CONTROL,
{},
MiroboVacuum.async_remote_control_start.__name__,
)
platform.async_register_entity_service(
SERVICE_STOP_REMOTE_CONTROL,
{},
MiroboVacuum.async_remote_control_stop.__name__,
)
platform.async_register_entity_service(
SERVICE_MOVE_REMOTE_CONTROL,
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
},
MiroboVacuum.async_remote_control_move.__name__,
)
platform.async_register_entity_service(
SERVICE_MOVE_REMOTE_CONTROL_STEP,
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
},
MiroboVacuum.async_remote_control_move_step.__name__,
)
platform.async_register_entity_service(
SERVICE_CLEAN_ZONE,
{
vol.Required(ATTR_ZONE_ARRAY): vol.All(
list,
[
vol.ExactSequence(
[
vol.Coerce(int),
vol.Coerce(int),
vol.Coerce(int),
vol.Coerce(int),
]
)
],
),
vol.Required(ATTR_ZONE_REPEATER): vol.All(
vol.Coerce(int), vol.Clamp(min=1, max=3)
),
},
MiroboVacuum.async_clean_zone.__name__,
)
platform.async_register_entity_service(
SERVICE_GOTO,
{
vol.Required("x_coord"): vol.Coerce(int),
vol.Required("y_coord"): vol.Coerce(int),
},
MiroboVacuum.async_goto.__name__,
)
platform.async_register_entity_service(
SERVICE_CLEAN_SEGMENT,
{vol.Required("segments"): vol.Any(vol.Coerce(int), [vol.Coerce(int)])},
MiroboVacuum.async_clean_segment.__name__,
)
async_add_entities(entities, update_before_add=True)
class MiroboVacuum(XiaomiCoordinatedMiioEntity, StateVacuumEntity):
"""Representation of a Xiaomi Vacuum cleaner robot."""
coordinator: DataUpdateCoordinator[VacuumCoordinatorData]
def __init__(
self, name, device, entry, unique_id, coordinator: DataUpdateCoordinator
):
"""Initialize the Xiaomi vacuum cleaner robot handler."""
super().__init__(name, device, entry, unique_id, coordinator)
self._state: str | None = None
async def async_added_to_hass(self) -> None:
"""Run when entity is about to be added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
@property
def state(self):
"""Return the status of the vacuum cleaner."""
# The vacuum reverts back to an idle state after erroring out.
# We want to keep returning an error until it has been cleared.
if self.coordinator.data.status.got_error:
return STATE_ERROR
return self._state
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self.coordinator.data.status.battery
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
speed = self.coordinator.data.status.fanspeed
if speed in self.coordinator.data.fan_speeds_reverse:
return self.coordinator.data.fan_speeds_reverse[speed]
_LOGGER.debug("Unable to find reverse for %s", speed)
return speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return (
list(self.coordinator.data.fan_speeds)
if self.coordinator.data.fan_speeds
else []
)
@property
def timers(self):
"""Get the list of added timers of the vacuum cleaner."""
return [
{
"enabled": timer.enabled,
"cron": timer.cron,
"next_schedule": as_utc(timer.next_schedule),
}
for timer in self.coordinator.data.timers
]
@property
def extra_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
attrs = {}
attrs[ATTR_STATUS] = str(self.coordinator.data.status.state)
if self.coordinator.data.status.got_error:
attrs[ATTR_ERROR] = self.coordinator.data.status.error
if self.timers:
attrs[ATTR_TIMERS] = self.timers
return attrs
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_XIAOMI
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a vacuum command handling error messages."""
try:
await self.hass.async_add_executor_job(partial(func, *args, **kwargs))
await self.coordinator.async_refresh()
return True
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
async def async_start(self):
"""Start or resume the cleaning task."""
await self._try_command(
"Unable to start the vacuum: %s", self._device.resume_or_start
)
async def async_pause(self):
"""Pause the cleaning task."""
await self._try_command("Unable to set start/pause: %s", self._device.pause)
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
await self._try_command("Unable to stop: %s", self._device.stop)
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed in self.coordinator.data.fan_speeds:
fan_speed = self.coordinator.data.fan_speeds[fan_speed]
else:
try:
fan_speed = int(fan_speed)
except ValueError as exc:
_LOGGER.error(
"Fan speed step not recognized (%s). Valid speeds are: %s",
exc,
self.fan_speed_list,
)
return
await self._try_command(
"Unable to set fan speed: %s", self._device.set_fan_speed, fan_speed
)
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
await self._try_command("Unable to return home: %s", self._device.home)
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
await self._try_command(
"Unable to start the vacuum for a spot clean-up: %s", self._device.spot
)
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner."""
await self._try_command("Unable to locate the botvac: %s", self._device.find)
async def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
await self._try_command(
"Unable to send command to the vacuum: %s",
self._device.raw_command,
command,
params,
)
async def async_remote_control_start(self):
"""Start remote control mode."""
await self._try_command(
"Unable to start remote control the vacuum: %s", self._device.manual_start
)
async def async_remote_control_stop(self):
"""Stop remote control mode."""
await self._try_command(
"Unable to stop remote control the vacuum: %s", self._device.manual_stop
)
async def async_remote_control_move(
self, rotation: int = 0, velocity: float = 0.3, duration: int = 1500
):
"""Move vacuum with remote control mode."""
await self._try_command(
"Unable to move with remote control the vacuum: %s",
self._device.manual_control,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_remote_control_move_step(
self, rotation: int = 0, velocity: float = 0.2, duration: int = 1500
):
"""Move vacuum one step with remote control mode."""
await self._try_command(
"Unable to remote control the vacuum: %s",
self._device.manual_control_once,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_goto(self, x_coord: int, y_coord: int):
"""Goto the specified coordinates."""
await self._try_command(
"Unable to send the vacuum cleaner to the specified coordinates: %s",
self._device.goto,
x_coord=x_coord,
y_coord=y_coord,
)
async def async_clean_segment(self, segments):
"""Clean the specified segments(s)."""
if isinstance(segments, int):
segments = [segments]
await self._try_command(
"Unable to start cleaning of the specified segments: %s",
self._device.segment_clean,
segments=segments,
)
async def async_clean_zone(self, zone, repeats=1):
"""Clean selected area for the number of repeats indicated."""
for _zone in zone:
_zone.append(repeats)
_LOGGER.debug("Zone with repeats: %s", zone)
try:
await self.hass.async_add_executor_job(self._device.zoned_clean, zone)
await self.coordinator.async_refresh()
except (OSError, DeviceException) as exc:
_LOGGER.error("Unable to send zoned_clean command to the vacuum: %s", exc)
@callback
def _handle_coordinator_update(self) -> None:
state_code = int(self.coordinator.data.status.state_code)
if state_code not in STATE_CODE_TO_STATE:
_LOGGER.error(
"STATE not supported: %s, state_code: %s",
self.coordinator.data.status.state,
self.coordinator.data.status.state_code,
)
self._state = None
else:
self._state = STATE_CODE_TO_STATE[state_code]
super()._handle_coordinator_update()
```
#### File: components/bosch_shc/test_config_flow.py
```python
from unittest.mock import PropertyMock, mock_open, patch
from boschshcpy.exceptions import (
SHCAuthenticationError,
SHCConnectionError,
SHCRegistrationError,
SHCSessionError,
)
from boschshcpy.information import SHCInformation
from homeassistant import config_entries
from homeassistant.components.bosch_shc.config_flow import write_tls_asset
from homeassistant.components.bosch_shc.const import CONF_SHC_CERT, CONF_SHC_KEY, DOMAIN
from tests.common import MockConfigEntry
MOCK_SETTINGS = {
"name": "<NAME>",
"device": {"mac": "test-mac", "hostname": "test-host"},
}
DISCOVERY_INFO = {
"host": ["172.16.17.32", "1.1.1.1"],
"port": 0,
"hostname": "shc012345.local.",
"type": "_http._tcp.local.",
"name": "Bosch SHC [test-mac]._http._tcp.local.",
}
async def test_form_user(hass, mock_zeroconf):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate"
) as mock_authenticate, patch(
"homeassistant.components.bosch_shc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "shc012345"
assert result3["data"] == {
"host": "1.1.1.1",
"ssl_certificate": hass.config.path(DOMAIN, CONF_SHC_CERT),
"ssl_key": hass.config.path(DOMAIN, CONF_SHC_KEY),
"token": "abc:123",
"hostname": "123",
}
assert len(mock_authenticate.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_get_info_connection_error(hass, mock_zeroconf):
"""Test we handle connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
side_effect=SHCConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_get_info_exception(hass):
"""Test we handle exceptions."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
async def test_form_pairing_error(hass, mock_zeroconf):
"""Test we handle pairing error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
side_effect=SHCRegistrationError(""),
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "pairing_failed"}
async def test_form_user_invalid_auth(hass, mock_zeroconf):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "<PASSWORD>",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
side_effect=SHCAuthenticationError,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "invalid_auth"}
async def test_form_validate_connection_error(hass, mock_zeroconf):
"""Test we handle connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:<PASSWORD>",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
side_effect=SHCConnectionError,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "cannot_connect"}
async def test_form_validate_session_error(hass, mock_zeroconf):
"""Test we handle session error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
side_effect=SHCSessionError(""),
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "session_error"}
async def test_form_validate_exception(hass, mock_zeroconf):
"""Test we handle exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
side_effect=Exception,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "unknown"}
async def test_form_already_configured(hass, mock_zeroconf):
"""Test we get the form."""
entry = MockConfigEntry(
domain="bosch_shc", unique_id="test-mac", data={"host": "0.0.0.0"}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
# Test config entry got updated with latest IP
assert entry.data["host"] == "1.1.1.1"
async def test_zeroconf(hass, mock_zeroconf):
"""Test we get the form."""
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "form"
assert result["step_id"] == "confirm_discovery"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"]["name"] == "shc012345"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
), patch(
"homeassistant.components.bosch_shc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "shc012345"
assert result3["data"] == {
"host": "1.1.1.1",
"ssl_certificate": hass.config.path(DOMAIN, CONF_SHC_CERT),
"ssl_key": hass.config.path(DOMAIN, CONF_SHC_KEY),
"token": "<PASSWORD>",
"hostname": "123",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_already_configured(hass, mock_zeroconf):
"""Test we get the form."""
entry = MockConfigEntry(
domain="bosch_shc", unique_id="test-mac", data={"host": "0.0.0.0"}
)
entry.add_to_hass(hass)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
# Test config entry got updated with latest IP
assert entry.data["host"] == "1.1.1.1"
async def test_zeroconf_cannot_connect(hass, mock_zeroconf):
"""Test we get the form."""
with patch(
"boschshcpy.session.SHCSession.mdns_info", side_effect=SHCConnectionError
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_zeroconf_link_local(hass, mock_zeroconf):
"""Test we get the form."""
DISCOVERY_INFO_LINK_LOCAL = {
"host": ["172.16.17.32"],
"port": 0,
"hostname": "shc012345.local.",
"type": "_http._tcp.local.",
"name": "Bosch SHC [test-mac]._http._tcp.local.",
}
with patch(
"boschshcpy.session.SHCSession.mdns_info", side_effect=SHCConnectionError
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO_LINK_LOCAL,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_zeroconf_not_bosch_shc(hass, mock_zeroconf):
"""Test we filter out non-bosch_shc devices."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={"host": "1.1.1.1", "name": "notboschshc"},
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "abort"
assert result["reason"] == "not_bosch_shc"
async def test_reauth(hass, mock_zeroconf):
"""Test we get the form."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id="test-mac",
data={
"host": "1.1.1.1",
"hostname": "test-mac",
"ssl_certificate": "test-cert.pem",
"ssl_key": "test-key.pem",
},
title="shc012345",
)
mock_config.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_REAUTH},
data=mock_config.data,
)
assert result["type"] == "form"
assert result["step_id"] == "reauth_confirm"
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "2.2.2.2"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate"
), patch(
"homeassistant.components.bosch_shc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result3["type"] == "abort"
assert result3["reason"] == "reauth_successful"
assert mock_config.data["host"] == "2.2.2.2"
assert len(mock_setup_entry.mock_calls) == 1
async def test_tls_assets_writer(hass):
"""Test we write tls assets to correct location."""
assets = {
"token": "<PASSWORD>",
"cert": b"content_cert",
"key": b"content_key",
}
with patch("os.mkdir"), patch("builtins.open", mock_open()) as mocked_file:
write_tls_asset(hass, CONF_SHC_CERT, assets["cert"])
mocked_file.assert_called_with(
hass.config.path(DOMAIN, CONF_SHC_CERT), "w", encoding="utf8"
)
mocked_file().write.assert_called_with("content_cert")
write_tls_asset(hass, CONF_SHC_KEY, assets["key"])
mocked_file.assert_called_with(
hass.config.path(DOMAIN, CONF_SHC_KEY), "w", encoding="utf8"
)
mocked_file().write.assert_called_with("content_key")
```
#### File: components/broadlink/test_config_flow.py
```python
import errno
import socket
from unittest.mock import call, patch
import broadlink.exceptions as blke
import pytest
from homeassistant import config_entries
from homeassistant.components.broadlink.const import DOMAIN
from homeassistant.components.dhcp import HOSTNAME, IP_ADDRESS, MAC_ADDRESS
from homeassistant.helpers import device_registry
from . import get_device
DEVICE_HELLO = "homeassistant.components.broadlink.config_flow.blk.hello"
DEVICE_FACTORY = "homeassistant.components.broadlink.config_flow.blk.gendevice"
@pytest.fixture(autouse=True)
def broadlink_setup_fixture():
"""Mock broadlink entry setup."""
with patch(
"homeassistant.components.broadlink.async_setup", return_value=True
), patch("homeassistant.components.broadlink.async_setup_entry", return_value=True):
yield
async def test_flow_user_works(hass):
"""Test a config flow initiated by the user.
Best case scenario with no errors or locks.
"""
device = get_device("Living Room")
mock_api = device.get_mock_api()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(DEVICE_HELLO, return_value=mock_api) as mock_hello:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "form"
assert result["step_id"] == "finish"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"] == device.get_entry_data()
assert mock_hello.call_count == 1
assert mock_api.auth.call_count == 1
async def test_flow_user_already_in_progress(hass):
"""Test we do not accept more than one config flow per device."""
device = get_device("Living Room")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=device.get_mock_api()):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=device.get_mock_api()):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_flow_user_mac_already_configured(hass):
"""Test we do not accept more than one config entry per device.
We need to abort the flow and update the existing entry.
"""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
device.host = "192.168.1.64"
device.timeout = 20
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert dict(mock_entry.data) == device.get_entry_data()
assert mock_api.auth.call_count == 0
async def test_flow_user_invalid_ip_address(hass):
"""Test we handle an invalid IP address in the user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, side_effect=OSError(errno.EINVAL, None)):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "0.0.0.1"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_host"}
async def test_flow_user_invalid_hostname(hass):
"""Test we handle an invalid hostname in the user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, side_effect=OSError(socket.EAI_NONAME, None)):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "pancakemaster.local"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_host"}
async def test_flow_user_device_not_found(hass):
"""Test we handle a device not found in the user step."""
device = get_device("Living Room")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, side_effect=blke.NetworkTimeoutError()):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_user_device_not_supported(hass):
"""Test we handle a device not supported in the user step."""
device = get_device("Kitchen")
mock_api = device.get_mock_api()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host},
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_flow_user_network_unreachable(hass):
"""Test we handle a network unreachable in the user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, side_effect=OSError(errno.ENETUNREACH, None)):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "192.168.1.32"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_user_os_error(hass):
"""Test we handle an OS error in the user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, side_effect=OSError()):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "192.168.1.32"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "unknown"}
async def test_flow_auth_authentication_error(hass):
"""Test we handle an authentication error in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "form"
assert result["step_id"] == "reset"
assert result["errors"] == {"base": "invalid_auth"}
async def test_flow_auth_network_timeout(hass):
"""Test we handle a network timeout in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.NetworkTimeoutError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_auth_firmware_error(hass):
"""Test we handle a firmware error in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.BroadlinkException()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "unknown"}
async def test_flow_auth_network_unreachable(hass):
"""Test we handle a network unreachable in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = OSError(errno.ENETUNREACH, None)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_auth_os_error(hass):
"""Test we handle an OS error in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = OSError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "unknown"}
async def test_flow_reset_works(hass):
"""Test we finish a config flow after a manual unlock."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
with patch(DEVICE_HELLO, return_value=device.get_mock_api()):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"] == device.get_entry_data()
async def test_flow_unlock_works(hass):
"""Test we finish a config flow with an unlock request."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"unlock": True},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"] == device.get_entry_data()
assert mock_api.set_lock.call_args == call(False)
assert mock_api.set_lock.call_count == 1
async def test_flow_unlock_network_timeout(hass):
"""Test we handle a network timeout in the unlock step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
mock_api.set_lock.side_effect = blke.NetworkTimeoutError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"unlock": True},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_unlock_firmware_error(hass):
"""Test we handle a firmware error in the unlock step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
mock_api.set_lock.side_effect = blke.BroadlinkException
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"unlock": True},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {"base": "unknown"}
async def test_flow_unlock_network_unreachable(hass):
"""Test we handle a network unreachable in the unlock step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
mock_api.set_lock.side_effect = OSError(errno.ENETUNREACH, None)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"unlock": True},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_unlock_os_error(hass):
"""Test we handle an OS error in the unlock step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
mock_api.set_lock.side_effect = OSError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"unlock": True},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {"base": "unknown"}
async def test_flow_do_not_unlock(hass):
"""Test we do not unlock the device if the user does not want to."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"unlock": False},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"] == device.get_entry_data()
assert mock_api.set_lock.call_count == 0
async def test_flow_import_works(hass):
"""Test an import flow."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api) as mock_hello:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "finish"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"]["host"] == device.host
assert result["data"]["mac"] == device.mac
assert result["data"]["type"] == device.devtype
assert mock_api.auth.call_count == 1
assert mock_hello.call_count == 1
async def test_flow_import_already_in_progress(hass):
"""Test we do not import more than one flow per device."""
device = get_device("Living Room")
data = {"host": device.host}
with patch(DEVICE_HELLO, return_value=device.get_mock_api()):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=data
)
with patch(DEVICE_HELLO, return_value=device.get_mock_api()):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=data
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_flow_import_host_already_configured(hass):
"""Test we do not import a host that is already configured."""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": device.host},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_flow_import_mac_already_configured(hass):
"""Test we do not import more than one config entry per device.
We need to abort the flow and update the existing entry.
"""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
device.host = "192.168.1.16"
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": device.host},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert mock_entry.data["host"] == device.host
assert mock_entry.data["mac"] == device.mac
assert mock_entry.data["type"] == device.devtype
assert mock_api.auth.call_count == 0
async def test_flow_import_device_not_found(hass):
"""Test we handle a device not found in the import step."""
with patch(DEVICE_HELLO, side_effect=blke.NetworkTimeoutError()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "192.168.1.32"},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_flow_import_device_not_supported(hass):
"""Test we handle a device not supported in the import step."""
device = get_device("Kitchen")
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": device.host},
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_flow_import_invalid_ip_address(hass):
"""Test we handle an invalid IP address in the import step."""
with patch(DEVICE_HELLO, side_effect=OSError(errno.EINVAL, None)):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "0.0.0.1"},
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_host"
async def test_flow_import_invalid_hostname(hass):
"""Test we handle an invalid hostname in the import step."""
with patch(DEVICE_HELLO, side_effect=OSError(socket.EAI_NONAME, None)):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "hotdog.local"},
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_host"
async def test_flow_import_network_unreachable(hass):
"""Test we handle a network unreachable in the import step."""
with patch(DEVICE_HELLO, side_effect=OSError(errno.ENETUNREACH, None)):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "192.168.1.64"},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_flow_import_os_error(hass):
"""Test we handle an OS error in the import step."""
with patch(DEVICE_HELLO, side_effect=OSError()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "192.168.1.64"},
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_flow_reauth_works(hass):
"""Test a reauthentication flow."""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
data = {"name": device.name, **device.get_entry_data()}
with patch(DEVICE_FACTORY, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=data
)
assert result["type"] == "form"
assert result["step_id"] == "reset"
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api) as mock_hello:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert dict(mock_entry.data) == device.get_entry_data()
assert mock_api.auth.call_count == 1
assert mock_hello.call_count == 1
async def test_flow_reauth_invalid_host(hass):
"""Test we do not accept an invalid host for reauthentication.
The MAC address cannot change.
"""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
data = {"name": device.name, **device.get_entry_data()}
with patch(DEVICE_FACTORY, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=data
)
device.mac = get_device("Office").mac
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api) as mock_hello:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_host"}
assert mock_hello.call_count == 1
assert mock_api.auth.call_count == 0
async def test_flow_reauth_valid_host(hass):
"""Test we accept a valid host for reauthentication.
The hostname/IP address may change. We need to update the entry.
"""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
data = {"name": device.name, **device.get_entry_data()}
with patch(DEVICE_FACTORY, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=data
)
device.host = "192.168.1.128"
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api) as mock_hello:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert mock_entry.data["host"] == device.host
assert mock_hello.call_count == 1
assert mock_api.auth.call_count == 1
async def test_dhcp_can_finish(hass):
"""Test DHCP discovery flow can finish right away."""
device = get_device("Living Room")
device.host = "1.2.3.4"
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "broadlink",
IP_ADDRESS: "1.2.3.4",
MAC_ADDRESS: device_registry.format_mac(device.mac),
},
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "finish"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Living Room"
assert result2["data"] == {
"host": "1.2.3.4",
"mac": "34ea34b43b5a",
"timeout": 10,
"type": 24374,
}
async def test_dhcp_fails_to_connect(hass):
"""Test DHCP discovery flow that fails to connect."""
with patch(DEVICE_HELLO, side_effect=blke.NetworkTimeoutError()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "broadlink",
IP_ADDRESS: "1.2.3.4",
MAC_ADDRESS: "34:ea:34:b4:3b:5a",
},
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_dhcp_unreachable(hass):
"""Test DHCP discovery flow that fails to connect."""
with patch(DEVICE_HELLO, side_effect=OSError(errno.ENETUNREACH, None)):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "broadlink",
IP_ADDRESS: "172.16.31.10",
MAC_ADDRESS: "34:ea:34:b4:3b:5a",
},
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_dhcp_connect_unknown_error(hass):
"""Test DHCP discovery flow that fails to connect with an OSError."""
with patch(DEVICE_HELLO, side_effect=OSError()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "broadlink",
IP_ADDRESS: "172.16.31.10",
MAC_ADDRESS: "34:ea:34:b4:3b:5a",
},
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_dhcp_device_not_supported(hass):
"""Test DHCP discovery flow that fails because the device is not supported."""
device = get_device("Kitchen")
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "broadlink",
IP_ADDRESS: device.host,
MAC_ADDRESS: device_registry.format_mac(device.mac),
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_dhcp_already_exists(hass):
"""Test DHCP discovery flow that fails to connect."""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
device.host = "1.2.3.4"
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "broadlink",
IP_ADDRESS: "1.2.3.4",
MAC_ADDRESS: "34:ea:34:b4:3b:5a",
},
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_dhcp_updates_host(hass):
"""Test DHCP updates host."""
device = get_device("Living Room")
device.host = "1.2.3.4"
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
with patch(DEVICE_HELLO, return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "broadlink",
IP_ADDRESS: "4.5.6.7",
MAC_ADDRESS: "34:ea:34:b4:3b:5a",
},
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert mock_entry.data["host"] == "4.5.6.7"
```
#### File: components/efergy/test_sensor.py
```python
import asyncio
from datetime import timedelta
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
token = "<KEY>"
multi_sensor_token = "<KEY>"
ONE_SENSOR_CONFIG = {
"platform": "efergy",
"app_token": token,
"utc_offset": "300",
"monitored_variables": [
{"type": "amount", "period": "day"},
{"type": "instant_readings"},
{"type": "budget"},
{"type": "cost", "period": "day", "currency": "$"},
{"type": "current_values"},
],
}
MULTI_SENSOR_CONFIG = {
"platform": "efergy",
"app_token": multi_sensor_token,
"utc_offset": "300",
"monitored_variables": [{"type": "current_values"}],
}
def mock_responses(aioclient_mock: AiohttpClientMocker, error: bool = False):
"""Mock responses for Efergy."""
base_url = "https://engage.efergy.com/mobile_proxy/"
if error:
aioclient_mock.get(
f"{base_url}getCurrentValuesSummary?token={token}", exc=asyncio.TimeoutError
)
return
aioclient_mock.get(
f"{base_url}getInstant?token={token}",
text=load_fixture("efergy/efergy_instant.json"),
)
aioclient_mock.get(
f"{base_url}getEnergy?token={token}&offset=300&period=day",
text=load_fixture("efergy/efergy_energy.json"),
)
aioclient_mock.get(
f"{base_url}getBudget?token={token}",
text=load_fixture("efergy/efergy_budget.json"),
)
aioclient_mock.get(
f"{base_url}getCost?token={token}&offset=300&period=day",
text=load_fixture("efergy/efergy_cost.json"),
)
aioclient_mock.get(
f"{base_url}getCurrentValuesSummary?token={token}",
text=load_fixture("efergy/efergy_current_values_single.json"),
)
aioclient_mock.get(
f"{base_url}getCurrentValuesSummary?token={multi_sensor_token}",
text=load_fixture("efergy/efergy_current_values_multi.json"),
)
async def test_single_sensor_readings(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
):
"""Test for successfully setting up the Efergy platform."""
mock_responses(aioclient_mock)
assert await async_setup_component(
hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: ONE_SENSOR_CONFIG}
)
await hass.async_block_till_done()
assert hass.states.get("sensor.energy_consumed").state == "38.21"
assert hass.states.get("sensor.energy_usage").state == "1580"
assert hass.states.get("sensor.energy_budget").state == "ok"
assert hass.states.get("sensor.energy_cost").state == "5.27"
assert hass.states.get("sensor.efergy_728386").state == "1628"
async def test_multi_sensor_readings(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
):
"""Test for multiple sensors in one household."""
mock_responses(aioclient_mock)
assert await async_setup_component(
hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: MULTI_SENSOR_CONFIG}
)
await hass.async_block_till_done()
assert hass.states.get("sensor.efergy_728386").state == "218"
assert hass.states.get("sensor.efergy_0").state == "1808"
assert hass.states.get("sensor.efergy_728387").state == "312"
async def test_failed_getting_sids(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
):
"""Test failed gettings sids."""
mock_responses(aioclient_mock, error=True)
assert await async_setup_component(
hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: ONE_SENSOR_CONFIG}
)
assert not hass.states.async_all("sensor")
async def test_failed_update_and_reconnection(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
):
"""Test failed update and reconnection."""
mock_responses(aioclient_mock)
assert await async_setup_component(
hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: ONE_SENSOR_CONFIG}
)
aioclient_mock.clear_requests()
mock_responses(aioclient_mock, error=True)
next_update = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert hass.states.get("sensor.efergy_728386").state == STATE_UNAVAILABLE
aioclient_mock.clear_requests()
mock_responses(aioclient_mock)
next_update = dt_util.utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert hass.states.get("sensor.efergy_728386").state == "1628"
```
#### File: components/onewire/test_sensor.py
```python
from unittest.mock import patch
from pyownet.protocol import Error as ProtocolError
import pytest
from homeassistant.components.onewire.const import (
DEFAULT_SYSBUS_MOUNT_DIR,
DOMAIN,
PLATFORMS,
)
from homeassistant.components.sensor import ATTR_STATE_CLASS, DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_UNIT_OF_MEASUREMENT,
)
from homeassistant.setup import async_setup_component
from . import (
setup_onewire_patched_owserver_integration,
setup_onewire_sysbus_integration,
setup_owproxy_mock_devices,
setup_sysbus_mock_devices,
)
from .const import MOCK_OWPROXY_DEVICES, MOCK_SYSBUS_DEVICES
from tests.common import assert_setup_component, mock_device_registry, mock_registry
MOCK_COUPLERS = {
key: value for (key, value) in MOCK_OWPROXY_DEVICES.items() if "branches" in value
}
async def test_setup_minimum(hass):
"""Test old platform setup with minimum configuration."""
config = {"sensor": {"platform": "onewire"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_sysbus(hass):
"""Test old platform setup with SysBus configuration."""
config = {
"sensor": {
"platform": "onewire",
"mount_dir": DEFAULT_SYSBUS_MOUNT_DIR,
}
}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_owserver(hass):
"""Test old platform setup with OWServer configuration."""
config = {"sensor": {"platform": "onewire", "host": "localhost"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_owserver_with_port(hass):
"""Test old platform setup with OWServer configuration."""
config = {"sensor": {"platform": "onewire", "host": "localhost", "port": "1234"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
@pytest.mark.parametrize("device_id", ["1F.111111111111"])
@patch("homeassistant.components.onewire.onewirehub.protocol.proxy")
async def test_sensors_on_owserver_coupler(owproxy, hass, device_id):
"""Test for 1-Wire sensors connected to DS2409 coupler."""
entity_registry = mock_registry(hass)
mock_coupler = MOCK_COUPLERS[device_id]
dir_side_effect = [] # List of lists of string
read_side_effect = [] # List of byte arrays
dir_side_effect.append([f"/{device_id}/"]) # dir on root
read_side_effect.append(device_id[0:2].encode()) # read family on root
if "inject_reads" in mock_coupler:
read_side_effect += mock_coupler["inject_reads"]
expected_sensors = []
for branch, branch_details in mock_coupler["branches"].items():
dir_side_effect.append(
[ # dir on branch
f"/{device_id}/{branch}/{sub_device_id}/"
for sub_device_id in branch_details
]
)
for sub_device_id, sub_device in branch_details.items():
read_side_effect.append(sub_device_id[0:2].encode())
if "inject_reads" in sub_device:
read_side_effect.extend(sub_device["inject_reads"])
expected_sensors += sub_device[SENSOR_DOMAIN]
for expected_sensor in sub_device[SENSOR_DOMAIN]:
read_side_effect.append(expected_sensor["injected_value"])
# Ensure enough read side effect
read_side_effect.extend([ProtocolError("Missing injected value")] * 10)
owproxy.return_value.dir.side_effect = dir_side_effect
owproxy.return_value.read.side_effect = read_side_effect
with patch("homeassistant.components.onewire.PLATFORMS", [SENSOR_DOMAIN]):
await setup_onewire_patched_owserver_integration(hass)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_sensors)
for expected_sensor in expected_sensors:
entity_id = expected_sensor["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_sensor["unique_id"]
assert registry_entry.disabled == expected_sensor.get("disabled", False)
state = hass.states.get(entity_id)
assert state.state == expected_sensor["result"]
for attr in (ATTR_DEVICE_CLASS, ATTR_STATE_CLASS, ATTR_UNIT_OF_MEASUREMENT):
assert state.attributes.get(attr) == expected_sensor[attr]
assert state.attributes["device_file"] == expected_sensor["device_file"]
@pytest.mark.parametrize("device_id", MOCK_OWPROXY_DEVICES.keys())
@pytest.mark.parametrize("platform", PLATFORMS)
@patch("homeassistant.components.onewire.onewirehub.protocol.proxy")
async def test_owserver_setup_valid_device(owproxy, hass, device_id, platform):
"""Test for 1-Wire device.
As they would be on a clean setup: all binary-sensors and switches disabled.
"""
entity_registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
setup_owproxy_mock_devices(owproxy, platform, [device_id])
mock_device = MOCK_OWPROXY_DEVICES[device_id]
expected_entities = mock_device.get(platform, [])
with patch("homeassistant.components.onewire.PLATFORMS", [platform]):
await setup_onewire_patched_owserver_integration(hass)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_entities)
if len(expected_entities) > 0:
device_info = mock_device["device_info"]
assert len(device_registry.devices) == 1
registry_entry = device_registry.async_get_device({(DOMAIN, device_id)})
assert registry_entry is not None
assert registry_entry.identifiers == {(DOMAIN, device_id)}
assert registry_entry.manufacturer == device_info[ATTR_MANUFACTURER]
assert registry_entry.name == device_info[ATTR_NAME]
assert registry_entry.model == device_info[ATTR_MODEL]
for expected_entity in expected_entities:
entity_id = expected_entity["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_entity["unique_id"]
assert registry_entry.disabled == expected_entity.get("disabled", False)
state = hass.states.get(entity_id)
if registry_entry.disabled:
assert state is None
else:
assert state.state == expected_entity["result"]
for attr in (ATTR_DEVICE_CLASS, ATTR_STATE_CLASS, ATTR_UNIT_OF_MEASUREMENT):
assert state.attributes.get(attr) == expected_entity[attr]
assert state.attributes["device_file"] == expected_entity.get(
"device_file", registry_entry.unique_id
)
@pytest.mark.parametrize("device_id", MOCK_SYSBUS_DEVICES.keys())
async def test_onewiredirect_setup_valid_device(hass, device_id):
"""Test that sysbus config entry works correctly."""
entity_registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
glob_result, read_side_effect = setup_sysbus_mock_devices(
SENSOR_DOMAIN, [device_id]
)
mock_device = MOCK_SYSBUS_DEVICES[device_id]
expected_entities = mock_device.get(SENSOR_DOMAIN, [])
with patch("pi1wire._finder.glob.glob", return_value=glob_result,), patch(
"pi1wire.OneWire.get_temperature",
side_effect=read_side_effect,
):
assert await setup_onewire_sysbus_integration(hass)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_entities)
if len(expected_entities) > 0:
device_info = mock_device["device_info"]
assert len(device_registry.devices) == 1
registry_entry = device_registry.async_get_device({(DOMAIN, device_id)})
assert registry_entry is not None
assert registry_entry.identifiers == {(DOMAIN, device_id)}
assert registry_entry.manufacturer == device_info[ATTR_MANUFACTURER]
assert registry_entry.name == device_info[ATTR_NAME]
assert registry_entry.model == device_info[ATTR_MODEL]
for expected_sensor in expected_entities:
entity_id = expected_sensor["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_sensor["unique_id"]
state = hass.states.get(entity_id)
assert state.state == expected_sensor["result"]
for attr in (ATTR_DEVICE_CLASS, ATTR_STATE_CLASS, ATTR_UNIT_OF_MEASUREMENT):
assert state.attributes.get(attr) == expected_sensor[attr]
```
#### File: components/template/test_select.py
```python
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ICON, CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from homeassistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
async def test_multiple_configs(hass, calls):
"""Test: multiple select entities get created."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": [
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
]
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
_verify(hass, "a", ["a", "b"], f"{_TEST_SELECT}_2")
async def test_missing_required_keys(hass, calls):
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all("select") == []
async def test_templates_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
ent_reg = async_get(hass)
entry = ent_reg.async_get(_TEST_SELECT)
assert entry
assert entry.unique_id == "b-a"
_verify(hass, "a", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
SERVICE_SET_OPTIONS,
{
CONF_ENTITY_ID: _OPTION_INPUT_SELECT,
INPUT_SELECT_ATTR_OPTIONS: ["a", "b", "c"],
},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b", "c"])
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _TEST_SELECT, SELECT_ATTR_OPTION: "c"},
blocking=True,
)
_verify(hass, "c", ["a", "b", "c"])
async def test_trigger_select(hass):
"""Test trigger based template select."""
events = async_capture_events(hass, "test_number_event")
assert await setup.async_setup_component(
hass,
"template",
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"select": [
{
"name": "<NAME>",
"unique_id": "hello_name-id",
"state": "{{ trigger.event.data.beer }}",
"options": "{{ trigger.event.data.beers }}",
"select_option": {"event": "test_number_event"},
"optimistic": True,
},
],
},
],
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == STATE_UNKNOWN
context = Context()
hass.bus.async_fire(
"test_event", {"beer": "duff", "beers": ["duff", "alamo"]}, context=context
)
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == "duff"
assert state.attributes["options"] == ["duff", "alamo"]
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: "select.hello_name", SELECT_ATTR_OPTION: "alamo"},
blocking=True,
)
assert len(events) == 1
assert events[0].event_type == "test_number_event"
def _verify(hass, expected_current_option, expected_options, entity_name=_TEST_SELECT):
"""Verify select's state."""
state = hass.states.get(entity_name)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
async def test_template_icon_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
"icon": f"{{% if (states('{_OPTION_INPUT_SELECT}') == 'a') %}}mdi:greater{{% else %}}mdi:less{{% endif %}}",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
async def test_template_icon_with_trigger(hass):
"""Test trigger based template select."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"trigger": {"platform": "state", "entity_id": _OPTION_INPUT_SELECT},
"select": {
"unique_id": "b",
"state": "{{ trigger.to_state.state }}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"icon": "{% if (trigger.to_state.state or '') == 'a' %}mdi:greater{% else %}mdi:less{% endif %}",
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state is not None
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "a"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
```
#### File: components/yeelight/test_light.py
```python
import asyncio
from datetime import timedelta
import logging
import socket
from unittest.mock import ANY, AsyncMock, MagicMock, call, patch
import pytest
from yeelight import (
BulbException,
BulbType,
HSVTransition,
LightType,
PowerMode,
RGBTransition,
SceneClass,
SleepTransition,
TemperatureTransition,
transitions,
)
from yeelight.flow import Action, Flow
from yeelight.main import _MODEL_SPECS
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
FLASH_LONG,
FLASH_SHORT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.components.yeelight import (
ATTR_COUNT,
ATTR_MODE_MUSIC,
ATTR_TRANSITIONS,
CONF_CUSTOM_EFFECTS,
CONF_FLOW_PARAMS,
CONF_MODE_MUSIC,
CONF_NIGHTLIGHT_SWITCH,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DEFAULT_MODE_MUSIC,
DEFAULT_NIGHTLIGHT_SWITCH,
DEFAULT_SAVE_ON_CHANGE,
DEFAULT_TRANSITION,
DOMAIN,
YEELIGHT_HSV_TRANSACTION,
YEELIGHT_RGB_TRANSITION,
YEELIGHT_SLEEP_TRANSACTION,
YEELIGHT_TEMPERATURE_TRANSACTION,
)
from homeassistant.components.yeelight.light import (
ATTR_MINUTES,
ATTR_MODE,
EFFECT_CANDLE_FLICKER,
EFFECT_DATE_NIGHT,
EFFECT_DISCO,
EFFECT_FACEBOOK,
EFFECT_FAST_RANDOM_LOOP,
EFFECT_HAPPY_BIRTHDAY,
EFFECT_HOME,
EFFECT_MOVIE,
EFFECT_NIGHT_MODE,
EFFECT_ROMANCE,
EFFECT_STOP,
EFFECT_SUNRISE,
EFFECT_SUNSET,
EFFECT_TWITTER,
EFFECT_WHATSAPP,
SERVICE_SET_AUTO_DELAY_OFF_SCENE,
SERVICE_SET_COLOR_FLOW_SCENE,
SERVICE_SET_COLOR_SCENE,
SERVICE_SET_COLOR_TEMP_SCENE,
SERVICE_SET_HSV_SCENE,
SERVICE_SET_MODE,
SERVICE_SET_MUSIC_MODE,
SERVICE_START_FLOW,
SUPPORT_YEELIGHT,
YEELIGHT_COLOR_EFFECT_LIST,
YEELIGHT_MONO_EFFECT_LIST,
YEELIGHT_TEMP_ONLY_EFFECT_LIST,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from homeassistant.util.color import (
color_hs_to_RGB,
color_hs_to_xy,
color_RGB_to_hs,
color_RGB_to_xy,
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
from . import (
CAPABILITIES,
ENTITY_LIGHT,
ENTITY_NIGHTLIGHT,
IP_ADDRESS,
MODULE,
NAME,
PROPERTIES,
UNIQUE_FRIENDLY_NAME,
_mocked_bulb,
_patch_discovery,
_patch_discovery_interval,
)
from tests.common import MockConfigEntry, async_fire_time_changed
CONFIG_ENTRY_DATA = {
CONF_HOST: IP_ADDRESS,
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: DEFAULT_NIGHTLIGHT_SWITCH,
}
async def test_services(hass: HomeAssistant, caplog):
"""Test Yeelight services."""
assert await async_setup_component(hass, "homeassistant", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
**CONFIG_ENTRY_DATA,
CONF_MODE_MUSIC: True,
CONF_SAVE_ON_CHANGE: True,
CONF_NIGHTLIGHT_SWITCH: True,
},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_LIGHT).state == STATE_ON
assert hass.states.get(ENTITY_NIGHTLIGHT).state == STATE_OFF
async def _async_test_service(
service,
data,
method,
payload=None,
domain=DOMAIN,
failure_side_effect=HomeAssistantError,
):
err_count = len([x for x in caplog.records if x.levelno == logging.ERROR])
# success
if method.startswith("async_"):
mocked_method = AsyncMock()
else:
mocked_method = MagicMock()
setattr(mocked_bulb, method, mocked_method)
await hass.services.async_call(domain, service, data, blocking=True)
if payload is None:
mocked_method.assert_called_once()
elif type(payload) == list:
mocked_method.assert_called_once_with(*payload)
else:
mocked_method.assert_called_once_with(**payload)
assert (
len([x for x in caplog.records if x.levelno == logging.ERROR]) == err_count
)
# failure
if failure_side_effect:
if method.startswith("async_"):
mocked_method = AsyncMock(side_effect=failure_side_effect)
else:
mocked_method = MagicMock(side_effect=failure_side_effect)
setattr(mocked_bulb, method, mocked_method)
with pytest.raises(failure_side_effect):
await hass.services.async_call(domain, service, data, blocking=True)
# turn_on rgb_color
brightness = 100
rgb_color = (0, 128, 255)
transition = 2
mocked_bulb.last_properties["power"] = "off"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS: brightness,
ATTR_RGB_COLOR: rgb_color,
ATTR_FLASH: FLASH_LONG,
ATTR_EFFECT: EFFECT_STOP,
ATTR_TRANSITION: transition,
},
blocking=True,
)
mocked_bulb.async_turn_on.assert_called_once_with(
duration=transition * 1000,
light_type=LightType.Main,
power_mode=PowerMode.NORMAL,
)
mocked_bulb.async_turn_on.reset_mock()
mocked_bulb.start_music.assert_called_once()
mocked_bulb.start_music.reset_mock()
mocked_bulb.async_set_brightness.assert_called_once_with(
brightness / 255 * 100, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_brightness.reset_mock()
mocked_bulb.async_set_color_temp.assert_not_called()
mocked_bulb.async_set_color_temp.reset_mock()
mocked_bulb.async_set_hsv.assert_not_called()
mocked_bulb.async_set_hsv.reset_mock()
mocked_bulb.async_set_rgb.assert_called_once_with(
*rgb_color, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_rgb.reset_mock()
mocked_bulb.async_start_flow.assert_called_once() # flash
mocked_bulb.async_start_flow.reset_mock()
mocked_bulb.async_stop_flow.assert_called_once_with(light_type=LightType.Main)
mocked_bulb.async_stop_flow.reset_mock()
# turn_on hs_color
brightness = 100
hs_color = (180, 100)
transition = 2
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS: brightness,
ATTR_HS_COLOR: hs_color,
ATTR_FLASH: FLASH_LONG,
ATTR_EFFECT: EFFECT_STOP,
ATTR_TRANSITION: transition,
},
blocking=True,
)
mocked_bulb.async_turn_on.assert_called_once_with(
duration=transition * 1000,
light_type=LightType.Main,
power_mode=PowerMode.NORMAL,
)
mocked_bulb.async_turn_on.reset_mock()
mocked_bulb.start_music.assert_called_once()
mocked_bulb.start_music.reset_mock()
mocked_bulb.async_set_brightness.assert_called_once_with(
brightness / 255 * 100, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_brightness.reset_mock()
mocked_bulb.async_set_color_temp.assert_not_called()
mocked_bulb.async_set_color_temp.reset_mock()
mocked_bulb.async_set_hsv.assert_called_once_with(
*hs_color, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_hsv.reset_mock()
mocked_bulb.async_set_rgb.assert_not_called()
mocked_bulb.async_set_rgb.reset_mock()
mocked_bulb.async_start_flow.assert_called_once() # flash
mocked_bulb.async_start_flow.reset_mock()
mocked_bulb.async_stop_flow.assert_called_once_with(light_type=LightType.Main)
mocked_bulb.async_stop_flow.reset_mock()
# turn_on color_temp
brightness = 100
color_temp = 200
transition = 1
mocked_bulb.last_properties["power"] = "off"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS: brightness,
ATTR_COLOR_TEMP: color_temp,
ATTR_FLASH: FLASH_LONG,
ATTR_EFFECT: EFFECT_STOP,
ATTR_TRANSITION: transition,
},
blocking=True,
)
mocked_bulb.async_turn_on.assert_called_once_with(
duration=transition * 1000,
light_type=LightType.Main,
power_mode=PowerMode.NORMAL,
)
mocked_bulb.async_turn_on.reset_mock()
mocked_bulb.start_music.assert_called_once()
mocked_bulb.async_set_brightness.assert_called_once_with(
brightness / 255 * 100, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_color_temp.assert_called_once_with(
color_temperature_mired_to_kelvin(color_temp),
duration=transition * 1000,
light_type=LightType.Main,
)
mocked_bulb.async_set_hsv.assert_not_called()
mocked_bulb.async_set_rgb.assert_not_called()
mocked_bulb.async_start_flow.assert_called_once() # flash
mocked_bulb.async_stop_flow.assert_called_once_with(light_type=LightType.Main)
# turn_on color_temp - flash short
brightness = 100
color_temp = 200
transition = 1
mocked_bulb.start_music.reset_mock()
mocked_bulb.async_set_brightness.reset_mock()
mocked_bulb.async_set_color_temp.reset_mock()
mocked_bulb.async_start_flow.reset_mock()
mocked_bulb.async_stop_flow.reset_mock()
mocked_bulb.last_properties["power"] = "off"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS: brightness,
ATTR_COLOR_TEMP: color_temp,
ATTR_FLASH: FLASH_SHORT,
ATTR_EFFECT: EFFECT_STOP,
ATTR_TRANSITION: transition,
},
blocking=True,
)
mocked_bulb.async_turn_on.assert_called_once_with(
duration=transition * 1000,
light_type=LightType.Main,
power_mode=PowerMode.NORMAL,
)
mocked_bulb.async_turn_on.reset_mock()
mocked_bulb.start_music.assert_called_once()
mocked_bulb.async_set_brightness.assert_called_once_with(
brightness / 255 * 100, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.async_set_color_temp.assert_called_once_with(
color_temperature_mired_to_kelvin(color_temp),
duration=transition * 1000,
light_type=LightType.Main,
)
mocked_bulb.async_set_hsv.assert_not_called()
mocked_bulb.async_set_rgb.assert_not_called()
mocked_bulb.async_start_flow.assert_called_once() # flash
mocked_bulb.async_stop_flow.assert_called_once_with(light_type=LightType.Main)
# turn_on nightlight
await _async_test_service(
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_NIGHTLIGHT},
"async_turn_on",
payload={
"duration": DEFAULT_TRANSITION,
"light_type": LightType.Main,
"power_mode": PowerMode.MOONLIGHT,
},
domain="light",
)
mocked_bulb.last_properties["power"] = "on"
assert hass.states.get(ENTITY_LIGHT).state != STATE_UNAVAILABLE
# turn_off
await _async_test_service(
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_TRANSITION: transition},
"async_turn_off",
domain="light",
payload={"duration": transition * 1000, "light_type": LightType.Main},
)
# set_mode
mode = "rgb"
await _async_test_service(
SERVICE_SET_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE: "rgb"},
"async_set_power_mode",
[PowerMode[mode.upper()]],
)
# start_flow
await _async_test_service(
SERVICE_START_FLOW,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_TRANSITIONS: [{YEELIGHT_TEMPERATURE_TRANSACTION: [1900, 2000, 60]}],
},
"async_start_flow",
)
# set_color_scene
await _async_test_service(
SERVICE_SET_COLOR_SCENE,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_RGB_COLOR: [10, 20, 30],
ATTR_BRIGHTNESS: 50,
},
"async_set_scene",
[SceneClass.COLOR, 10, 20, 30, 50],
)
# set_hsv_scene
await _async_test_service(
SERVICE_SET_HSV_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_HS_COLOR: [180, 50], ATTR_BRIGHTNESS: 50},
"async_set_scene",
[SceneClass.HSV, 180, 50, 50],
)
# set_color_temp_scene
await _async_test_service(
SERVICE_SET_COLOR_TEMP_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_KELVIN: 4000, ATTR_BRIGHTNESS: 50},
"async_set_scene",
[SceneClass.CT, 4000, 50],
)
# set_color_flow_scene
await _async_test_service(
SERVICE_SET_COLOR_FLOW_SCENE,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_TRANSITIONS: [{YEELIGHT_TEMPERATURE_TRANSACTION: [1900, 2000, 60]}],
},
"async_set_scene",
)
# set_auto_delay_off_scene
await _async_test_service(
SERVICE_SET_AUTO_DELAY_OFF_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MINUTES: 1, ATTR_BRIGHTNESS: 50},
"async_set_scene",
[SceneClass.AUTO_DELAY_OFF, 50, 1],
)
# set_music_mode failure enable
mocked_bulb.start_music = MagicMock(side_effect=AssertionError)
assert "Unable to turn on music mode, consider disabling it" not in caplog.text
await hass.services.async_call(
DOMAIN,
SERVICE_SET_MUSIC_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE_MUSIC: "true"},
blocking=True,
)
assert mocked_bulb.start_music.mock_calls == [call()]
assert "Unable to turn on music mode, consider disabling it" in caplog.text
# set_music_mode disable
await _async_test_service(
SERVICE_SET_MUSIC_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE_MUSIC: "false"},
"stop_music",
failure_side_effect=None,
)
# set_music_mode success enable
await _async_test_service(
SERVICE_SET_MUSIC_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE_MUSIC: "true"},
"start_music",
failure_side_effect=None,
)
# test _cmd wrapper error handler
mocked_bulb.last_properties["power"] = "off"
mocked_bulb.available = True
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ENTITY_LIGHT},
blocking=True,
)
assert hass.states.get(ENTITY_LIGHT).state == STATE_OFF
mocked_bulb.async_turn_on = AsyncMock()
mocked_bulb.async_set_brightness = AsyncMock(side_effect=BulbException)
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_BRIGHTNESS: 50},
blocking=True,
)
assert hass.states.get(ENTITY_LIGHT).state == STATE_OFF
mocked_bulb.async_set_brightness = AsyncMock(side_effect=asyncio.TimeoutError)
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_BRIGHTNESS: 55},
blocking=True,
)
assert hass.states.get(ENTITY_LIGHT).state == STATE_OFF
mocked_bulb.async_set_brightness = AsyncMock(side_effect=socket.error)
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_BRIGHTNESS: 55},
blocking=True,
)
assert hass.states.get(ENTITY_LIGHT).state == STATE_UNAVAILABLE
async def test_update_errors(hass: HomeAssistant, caplog):
"""Test update errors."""
assert await async_setup_component(hass, "homeassistant", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
**CONFIG_ENTRY_DATA,
CONF_MODE_MUSIC: True,
CONF_SAVE_ON_CHANGE: True,
CONF_NIGHTLIGHT_SWITCH: True,
},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_LIGHT).state == STATE_ON
assert hass.states.get(ENTITY_NIGHTLIGHT).state == STATE_OFF
# Timeout usually means the bulb is overloaded with commands
# but will still respond eventually.
mocked_bulb.async_turn_off = AsyncMock(side_effect=asyncio.TimeoutError)
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
"light",
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_LIGHT},
blocking=True,
)
assert hass.states.get(ENTITY_LIGHT).state == STATE_ON
# socket.error usually means the bulb dropped the connection
# or lost wifi, then came back online and forced the existing
# connection closed with a TCP RST
mocked_bulb.async_turn_off = AsyncMock(side_effect=socket.error)
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
"light",
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_LIGHT},
blocking=True,
)
assert hass.states.get(ENTITY_LIGHT).state == STATE_UNAVAILABLE
async def test_state_already_set_avoid_ratelimit(hass: HomeAssistant):
"""Ensure we suppress state changes that will increase the rate limit when there is no change."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
properties.pop("active_mode")
properties.pop("nl_br")
properties["color_mode"] = "3" # HSV
mocked_bulb.last_properties = properties
mocked_bulb.bulb_type = BulbType.Color
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False}
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# We use asyncio.create_task now to avoid
# blocking starting so we need to block again
await hass.async_block_till_done()
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_HS_COLOR: (PROPERTIES["hue"], PROPERTIES["sat"]),
},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
mocked_bulb.last_properties["color_mode"] = 1
rgb = int(PROPERTIES["rgb"])
blue = rgb & 0xFF
green = (rgb >> 8) & 0xFF
red = (rgb >> 16) & 0xFF
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_RGB_COLOR: (red, green, blue)},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
mocked_bulb.async_set_rgb.reset_mock()
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS_PCT: PROPERTIES["bright"],
},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_COLOR_TEMP: 250},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
# Should call for the color mode change
assert mocked_bulb.async_set_color_temp.mock_calls == [
call(4000, duration=350, light_type=ANY)
]
assert mocked_bulb.async_set_brightness.mock_calls == []
mocked_bulb.async_set_color_temp.reset_mock()
mocked_bulb.last_properties["color_mode"] = 2
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_COLOR_TEMP: 250},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == []
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
mocked_bulb.last_properties["color_mode"] = 3
# This last change should generate a call even though
# the color mode is the same since the HSV has changed
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_HS_COLOR: (5, 5)},
blocking=True,
)
assert mocked_bulb.async_set_hsv.mock_calls == [
call(5.0, 5.0, duration=350, light_type=ANY)
]
assert mocked_bulb.async_set_rgb.mock_calls == []
assert mocked_bulb.async_set_color_temp.mock_calls == []
assert mocked_bulb.async_set_brightness.mock_calls == []
async def test_device_types(hass: HomeAssistant, caplog):
"""Test different device types."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
properties.pop("active_mode")
properties["color_mode"] = "3" # HSV
mocked_bulb.last_properties = properties
async def _async_setup(config_entry):
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# We use asyncio.create_task now to avoid
# blocking starting so we need to block again
await hass.async_block_till_done()
async def _async_test(
bulb_type,
model,
target_properties,
nightlight_entity_properties=None,
name=UNIQUE_FRIENDLY_NAME,
entity_id=ENTITY_LIGHT,
nightlight_mode_properties=None,
):
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False}
)
config_entry.add_to_hass(hass)
mocked_bulb.bulb_type = bulb_type
model_specs = _MODEL_SPECS.get(model)
type(mocked_bulb).get_model_specs = MagicMock(return_value=model_specs)
original_nightlight_brightness = mocked_bulb.last_properties["nl_br"]
mocked_bulb.last_properties["nl_br"] = "0"
await _async_setup(config_entry)
state = hass.states.get(entity_id)
assert state.state == "on"
target_properties["friendly_name"] = name
target_properties["flowing"] = False
target_properties["night_light"] = False
target_properties["music_mode"] = False
assert dict(state.attributes) == target_properties
await hass.config_entries.async_unload(config_entry.entry_id)
await config_entry.async_remove(hass)
registry = er.async_get(hass)
registry.async_clear_config_entry(config_entry.entry_id)
mocked_bulb.last_properties["nl_br"] = original_nightlight_brightness
# nightlight as a setting of the main entity
if nightlight_mode_properties is not None:
mocked_bulb.last_properties["active_mode"] = True
config_entry.add_to_hass(hass)
await _async_setup(config_entry)
state = hass.states.get(entity_id)
assert state.state == "on"
nightlight_mode_properties["friendly_name"] = name
nightlight_mode_properties["flowing"] = False
nightlight_mode_properties["night_light"] = True
nightlight_mode_properties["music_mode"] = False
assert dict(state.attributes) == nightlight_mode_properties
await hass.config_entries.async_unload(config_entry.entry_id)
await config_entry.async_remove(hass)
registry.async_clear_config_entry(config_entry.entry_id)
await hass.async_block_till_done()
mocked_bulb.last_properties.pop("active_mode")
# nightlight as a separate entity
if nightlight_entity_properties is not None:
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: True}
)
config_entry.add_to_hass(hass)
await _async_setup(config_entry)
assert hass.states.get(entity_id).state == "off"
state = hass.states.get(f"{entity_id}_nightlight")
assert state.state == "on"
nightlight_entity_properties["friendly_name"] = f"{name} Nightlight"
nightlight_entity_properties["icon"] = "mdi:weather-night"
nightlight_entity_properties["flowing"] = False
nightlight_entity_properties["night_light"] = True
nightlight_entity_properties["music_mode"] = False
assert dict(state.attributes) == nightlight_entity_properties
await hass.config_entries.async_unload(config_entry.entry_id)
await config_entry.async_remove(hass)
registry.async_clear_config_entry(config_entry.entry_id)
await hass.async_block_till_done()
bright = round(255 * int(PROPERTIES["bright"]) / 100)
ct = color_temperature_kelvin_to_mired(int(PROPERTIES["ct"]))
hue = int(PROPERTIES["hue"])
sat = int(PROPERTIES["sat"])
rgb = int(PROPERTIES["rgb"])
rgb_color = ((rgb >> 16) & 0xFF, (rgb >> 8) & 0xFF, rgb & 0xFF)
hs_color = (hue, sat)
bg_bright = round(255 * int(PROPERTIES["bg_bright"]) / 100)
bg_ct = color_temperature_kelvin_to_mired(int(PROPERTIES["bg_ct"]))
bg_hue = int(PROPERTIES["bg_hue"])
bg_sat = int(PROPERTIES["bg_sat"])
bg_rgb = int(PROPERTIES["bg_rgb"])
bg_hs_color = (bg_hue, bg_sat)
bg_rgb_color = ((bg_rgb >> 16) & 0xFF, (bg_rgb >> 8) & 0xFF, bg_rgb & 0xFF)
nl_br = round(255 * int(PROPERTIES["nl_br"]) / 100)
# Default
await _async_test(
None,
"mono",
{
"effect_list": YEELIGHT_MONO_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": bright,
"color_mode": "brightness",
"supported_color_modes": ["brightness"],
},
)
# White
await _async_test(
BulbType.White,
"mono",
{
"effect_list": YEELIGHT_MONO_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": bright,
"color_mode": "brightness",
"supported_color_modes": ["brightness"],
},
)
# Color - color mode CT
mocked_bulb.last_properties["color_mode"] = "2" # CT
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": bright,
"color_temp": ct,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp", "hs", "rgb"],
"hs_color": (26.812, 34.87),
"rgb_color": (255, 205, 166),
"xy_color": (0.421, 0.364),
},
nightlight_entity_properties={
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
nightlight_mode_properties={
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"hs_color": (28.401, 100.0),
"rgb_color": (255, 120, 0),
"xy_color": (0.621, 0.367),
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": nl_br,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp", "hs", "rgb"],
"color_temp": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
},
)
# Color - color mode HS
mocked_bulb.last_properties["color_mode"] = "3" # HSV
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": bright,
"hs_color": hs_color,
"rgb_color": color_hs_to_RGB(*hs_color),
"xy_color": color_hs_to_xy(*hs_color),
"color_mode": "hs",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
nightlight_entity_properties={
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - color mode RGB
mocked_bulb.last_properties["color_mode"] = "1" # RGB
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": bright,
"hs_color": color_RGB_to_hs(*rgb_color),
"rgb_color": rgb_color,
"xy_color": color_RGB_to_xy(*rgb_color),
"color_mode": "rgb",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
nightlight_entity_properties={
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - color mode HS but no hue
mocked_bulb.last_properties["color_mode"] = "3" # HSV
mocked_bulb.last_properties["hue"] = None
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": bright,
"color_mode": "hs",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
nightlight_entity_properties={
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - color mode RGB but no color
mocked_bulb.last_properties["color_mode"] = "1" # RGB
mocked_bulb.last_properties["rgb"] = None
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": bright,
"color_mode": "rgb",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
nightlight_entity_properties={
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
# Color - unsupported color_mode
mocked_bulb.last_properties["color_mode"] = 4 # Unsupported
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"color_mode": "unknown",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
{
"supported_features": 0,
"color_mode": "onoff",
"supported_color_modes": ["onoff"],
},
)
assert "Light reported unknown color mode: 4" in caplog.text
# WhiteTemp
model_specs = _MODEL_SPECS["ceiling1"]
await _async_test(
BulbType.WhiteTemp,
"ceiling1",
{
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": bright,
"color_temp": ct,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp"],
"hs_color": (26.812, 34.87),
"rgb_color": (255, 205, 166),
"xy_color": (0.421, 0.364),
},
nightlight_entity_properties={
"supported_features": 0,
"brightness": nl_br,
"color_mode": "brightness",
"supported_color_modes": ["brightness"],
},
nightlight_mode_properties={
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": nl_br,
"color_temp": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"color_mode": "color_temp",
"supported_color_modes": ["color_temp"],
"hs_color": (28.391, 65.659),
"rgb_color": (255, 166, 87),
"xy_color": (0.526, 0.387),
},
)
# WhiteTempMood
properties.pop("power")
properties["main_power"] = "on"
model_specs = _MODEL_SPECS["ceiling4"]
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"friendly_name": NAME,
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"flowing": False,
"night_light": True,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": bright,
"color_temp": ct,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp"],
"hs_color": (26.812, 34.87),
"rgb_color": (255, 205, 166),
"xy_color": (0.421, 0.364),
},
nightlight_entity_properties={
"supported_features": 0,
"brightness": nl_br,
"color_mode": "brightness",
"supported_color_modes": ["brightness"],
},
nightlight_mode_properties={
"friendly_name": NAME,
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"flowing": False,
"night_light": True,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": nl_br,
"color_temp": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"color_mode": "color_temp",
"supported_color_modes": ["color_temp"],
"hs_color": (28.391, 65.659),
"rgb_color": (255, 166, 87),
"xy_color": (0.526, 0.387),
},
)
# Background light - color mode CT
mocked_bulb.last_properties["bg_lmode"] = "2" # CT
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(6500),
"max_mireds": color_temperature_kelvin_to_mired(1700),
"brightness": bg_bright,
"color_temp": bg_ct,
"color_mode": "color_temp",
"supported_color_modes": ["color_temp", "hs", "rgb"],
"hs_color": (27.001, 19.243),
"rgb_color": (255, 228, 205),
"xy_color": (0.372, 0.35),
},
name=f"{UNIQUE_FRIENDLY_NAME} Ambilight",
entity_id=f"{ENTITY_LIGHT}_ambilight",
)
# Background light - color mode HS
mocked_bulb.last_properties["bg_lmode"] = "3" # HS
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(6500),
"max_mireds": color_temperature_kelvin_to_mired(1700),
"brightness": bg_bright,
"hs_color": bg_hs_color,
"rgb_color": color_hs_to_RGB(*bg_hs_color),
"xy_color": color_hs_to_xy(*bg_hs_color),
"color_mode": "hs",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
name=f"{UNIQUE_FRIENDLY_NAME} Ambilight",
entity_id=f"{ENTITY_LIGHT}_ambilight",
)
# Background light - color mode RGB
mocked_bulb.last_properties["bg_lmode"] = "1" # RGB
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"min_mireds": color_temperature_kelvin_to_mired(6500),
"max_mireds": color_temperature_kelvin_to_mired(1700),
"brightness": bg_bright,
"hs_color": color_RGB_to_hs(*bg_rgb_color),
"rgb_color": bg_rgb_color,
"xy_color": color_RGB_to_xy(*bg_rgb_color),
"color_mode": "rgb",
"supported_color_modes": ["color_temp", "hs", "rgb"],
},
name=f"{UNIQUE_FRIENDLY_NAME} Ambilight",
entity_id=f"{ENTITY_LIGHT}_ambilight",
)
async def test_effects(hass: HomeAssistant):
"""Test effects."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_CUSTOM_EFFECTS: [
{
CONF_NAME: "mock_effect",
CONF_FLOW_PARAMS: {
ATTR_COUNT: 3,
ATTR_TRANSITIONS: [
{YEELIGHT_HSV_TRANSACTION: [300, 50, 500, 50]},
{YEELIGHT_RGB_TRANSITION: [100, 100, 100, 300, 30]},
{YEELIGHT_TEMPERATURE_TRANSACTION: [3000, 200, 20]},
{YEELIGHT_SLEEP_TRANSACTION: [800]},
],
},
}
]
}
},
)
config_entry = MockConfigEntry(domain=DOMAIN, data=CONFIG_ENTRY_DATA)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_LIGHT).attributes.get(
"effect_list"
) == YEELIGHT_COLOR_EFFECT_LIST + ["mock_effect"]
async def _async_test_effect(name, target=None, called=True):
async_mocked_start_flow = AsyncMock()
mocked_bulb.async_start_flow = async_mocked_start_flow
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_EFFECT: name},
blocking=True,
)
if not called:
return
async_mocked_start_flow.assert_called_once()
if target is None:
return
args, _ = async_mocked_start_flow.call_args
flow = args[0]
assert flow.count == target.count
assert flow.action == target.action
assert str(flow.transitions) == str(target.transitions)
effects = {
"mock_effect": Flow(
count=3,
transitions=[
HSVTransition(300, 50, 500, 50),
RGBTransition(100, 100, 100, 300, 30),
TemperatureTransition(3000, 200, 20),
SleepTransition(800),
],
),
EFFECT_DISCO: Flow(transitions=transitions.disco()),
EFFECT_FAST_RANDOM_LOOP: None,
EFFECT_WHATSAPP: Flow(count=2, transitions=transitions.pulse(37, 211, 102)),
EFFECT_FACEBOOK: Flow(count=2, transitions=transitions.pulse(59, 89, 152)),
EFFECT_TWITTER: Flow(count=2, transitions=transitions.pulse(0, 172, 237)),
EFFECT_HOME: Flow(
count=0,
action=Action.recover,
transitions=[
TemperatureTransition(degrees=3200, duration=500, brightness=80)
],
),
EFFECT_NIGHT_MODE: Flow(
count=0,
action=Action.recover,
transitions=[RGBTransition(0xFF, 0x99, 0x00, duration=500, brightness=1)],
),
EFFECT_DATE_NIGHT: Flow(
count=0,
action=Action.recover,
transitions=[RGBTransition(0xFF, 0x66, 0x00, duration=500, brightness=50)],
),
EFFECT_MOVIE: Flow(
count=0,
action=Action.recover,
transitions=[
RGBTransition(
red=0x14, green=0x14, blue=0x32, duration=500, brightness=50
)
],
),
EFFECT_SUNRISE: Flow(
count=1,
action=Action.stay,
transitions=[
RGBTransition(
red=0xFF, green=0x4D, blue=0x00, duration=50, brightness=1
),
TemperatureTransition(degrees=1700, duration=360000, brightness=10),
TemperatureTransition(degrees=2700, duration=540000, brightness=100),
],
),
EFFECT_SUNSET: Flow(
count=1,
action=Action.off,
transitions=[
TemperatureTransition(degrees=2700, duration=50, brightness=10),
TemperatureTransition(degrees=1700, duration=180000, brightness=5),
RGBTransition(
red=0xFF, green=0x4C, blue=0x00, duration=420000, brightness=1
),
],
),
EFFECT_ROMANCE: Flow(
count=0,
action=Action.stay,
transitions=[
RGBTransition(
red=0x59, green=0x15, blue=0x6D, duration=4000, brightness=1
),
RGBTransition(
red=0x66, green=0x14, blue=0x2A, duration=4000, brightness=1
),
],
),
EFFECT_HAPPY_BIRTHDAY: Flow(
count=0,
action=Action.stay,
transitions=[
RGBTransition(
red=0xDC, green=0x50, blue=0x19, duration=1996, brightness=80
),
RGBTransition(
red=0xDC, green=0x78, blue=0x1E, duration=1996, brightness=80
),
RGBTransition(
red=0xAA, green=0x32, blue=0x14, duration=1996, brightness=80
),
],
),
EFFECT_CANDLE_FLICKER: Flow(
count=0,
action=Action.recover,
transitions=[
TemperatureTransition(degrees=2700, duration=800, brightness=50),
TemperatureTransition(degrees=2700, duration=800, brightness=30),
TemperatureTransition(degrees=2700, duration=1200, brightness=80),
TemperatureTransition(degrees=2700, duration=800, brightness=60),
TemperatureTransition(degrees=2700, duration=1200, brightness=90),
TemperatureTransition(degrees=2700, duration=2400, brightness=50),
TemperatureTransition(degrees=2700, duration=1200, brightness=80),
TemperatureTransition(degrees=2700, duration=800, brightness=60),
TemperatureTransition(degrees=2700, duration=400, brightness=70),
],
),
}
for name, target in effects.items():
await _async_test_effect(name, target)
await _async_test_effect("not_existed", called=False)
async def test_ambilight_with_nightlight_disabled(hass: HomeAssistant):
"""Test that main light on ambilights with the nightlight disabled shows the correct brightness."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
capabilities = {**CAPABILITIES}
capabilities["model"] = "ceiling10"
properties["color_mode"] = "3" # HSV
properties["bg_power"] = "off"
properties["bg_lmode"] = "2" # CT
mocked_bulb.last_properties = properties
mocked_bulb.bulb_type = BulbType.WhiteTempMood
main_light_entity_id = "light.yeelight_ceiling10_0x15243f"
config_entry = MockConfigEntry(
domain=DOMAIN,
data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False},
options={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False},
)
config_entry.add_to_hass(hass)
with _patch_discovery(capabilities=capabilities), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# We use asyncio.create_task now to avoid
# blocking starting so we need to block again
await hass.async_block_till_done()
state = hass.states.get(main_light_entity_id)
assert state.state == "on"
# bg_power off should not set the brightness to 0
assert state.attributes[ATTR_BRIGHTNESS] == 128
async def test_state_fails_to_update_triggers_update(hass: HomeAssistant):
"""Ensure we call async_get_properties if the turn on/off fails to update the state."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
properties.pop("active_mode")
properties["color_mode"] = "3" # HSV
mocked_bulb.last_properties = properties
mocked_bulb.bulb_type = BulbType.Color
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: False}
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# We use asyncio.create_task now to avoid
# blocking starting so we need to block again
await hass.async_block_till_done()
assert len(mocked_bulb.async_get_properties.mock_calls) == 1
mocked_bulb.last_properties["power"] = "off"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
},
blocking=True,
)
assert len(mocked_bulb.async_turn_on.mock_calls) == 1
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert len(mocked_bulb.async_get_properties.mock_calls) == 2
mocked_bulb.last_properties["power"] = "on"
for _ in range(5):
await hass.services.async_call(
"light",
SERVICE_TURN_OFF,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
},
blocking=True,
)
assert len(mocked_bulb.async_turn_off.mock_calls) == 5
# Even with five calls we only do one state request
# since each successive call should cancel the unexpected
# state check
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=2))
await hass.async_block_till_done()
assert len(mocked_bulb.async_get_properties.mock_calls) == 3
# But if the state is correct no calls
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
},
blocking=True,
)
assert len(mocked_bulb.async_turn_on.mock_calls) == 1
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=3))
await hass.async_block_till_done()
assert len(mocked_bulb.async_get_properties.mock_calls) == 3
```
|
{
"source": "JeffersonCarvalh0/hawkins",
"score": 2
}
|
#### File: hawkins/crud/forms.py
```python
from .models import SchoolClass, Student, Grade, Subject
from django import forms
class ClassAddStudentForm(forms.ModelForm):
students = forms.ModelMultipleChoiceField(
widget = forms.CheckboxSelectMultiple(),
queryset = Student.objects.all(),
required = False
)
class Meta:
model = SchoolClass
fields = ('students',)
class MyModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return obj.name
class CreateClassFromExistingForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
school_class = kwargs.pop('school_class', None)
super().__init__(*args, **kwargs)
self.fields['regular_grades_num'] = forms.IntegerField(initial=school_class.regular_grades_num)
self.fields['retake_grades_num'] = forms.IntegerField(initial=school_class.retake_grades_num)
self.fields['avg'] = forms.FloatField(initial=school_class.avg)
self.fields['students'] = forms.ModelMultipleChoiceField(
widget = forms.CheckboxSelectMultiple(),
queryset = school_class.students.all(),
required = False
)
self.fields['subjects'] = MyModelMultipleChoiceField(
widget = forms.CheckboxSelectMultiple(),
queryset = school_class.subjects.all(),
required = False
)
class Meta:
model = SchoolClass
fields = '__all__'
```
|
{
"source": "JeffersonCarvalh0/periodic-cat-photos",
"score": 3
}
|
#### File: JeffersonCarvalh0/periodic-cat-photos/periodic_cats.py
```python
import sys
import discord
from discord.ext import commands
import requests
import api
class PeriodicCats(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if not message.author == self.bot.user:
print(
"\ncommand [%s] sent by user %s in channel %s from guild %s"
% (message.content, message.author, message.channel, message.guild)
)
@commands.Cog.listener()
async def on_command_error(
self, context: commands.Context, error: commands.CommandError
):
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, commands.CommandInvokeError):
print("exception: %s" % error.original, file=sys.stderr)
if isinstance(error.original, requests.exceptions.HTTPError):
message = "An error has ocurred with our cat images provider. Please try again later"
else:
message = "An unknown error has ocurred"
else:
message = "Something went wrong"
await context.send(message, reference=context.message)
@commands.command(name="random")
async def random_cat_photo(self, context: commands.Context):
"""Shows a random cat picture"""
cat_image_url = api.get_random_photo_url()
await context.send(
cat_image_url,
reference=context.message,
)
def setup(bot: commands.Bot):
bot.add_cog(PeriodicCats(bot))
```
|
{
"source": "jeffersonfs/Augmedical",
"score": 3
}
|
#### File: augmedical/mask/mask.py
```python
# from augmedical.filters.filters import BoxBlur
# import torch
#
#
# class UncertainMask:
# def __init__(self, channels=1, filter_size=3, iterations=1, alpha=1):
# """
# In medical imaging and segmentation tasks, the borders of the mask are more uncertain than the "solid" part.
# To reduce the impact of the exact segmentation, the label is reduced for those images.
#
# The idea is similar to Superpixel-Guided Label Softening for Medical Image Segmentation https://arxiv.org/pdf/2007.08897.pdf
# but we simply blur the mask with a box blur.
#
# this is a wrapper around boxblur...
# ----
# Args:
# channels (int, optional): [Number of channels to apply the filter to]. Defaults to 1.
# filter_size (int, optional): [description]. Defaults to 3.
# iterations (int, optional): [description]. Defaults to 1.
# """
#
# self.blur = BoxBlur(channels, p=1, kernel_size=filter_size, alpha=alpha, iterations=iterations)
#
# def __call__(self, tensor):
# if tensor.dim() == 3:
# c, w, h = tensor.shape
# return self.blur(tensor.view(1, c, w, h)).view(c, w, h)
#
# elif tensor.dim() == 4:
# return self.blur(tensor)
# else:
# raise Exception("not implemented", "currently only supporting 2 and 3 d images.")
#
#
# class LabelSmoothing:
# # https://github.com/pytorch/pytorch/issues/7455#issuecomment-631829085
# def __init__(self, smoothing_factor=0.9) -> None:
# self.smoothing_factor = smoothing_factor
#
# def __call__(self, labels):
# with torch.no_grad():
# confidence = 1.0 - self.smoothing_factor
#
# true_dist = torch.mul(labels, confidence)
# true_dist = torch.add(true_dist, self.smoothing_factor / (labels.shape[1] - 1))
#
# return true_dist
```
|
{
"source": "jeffersonfs/ivision",
"score": 3
}
|
#### File: jeffersonfs/ivision/utils.py
```python
from shapely.geometry import Polygon
from shapely.affinity import rotate
def iou_polygons(a, b):
''' Calculates Intersection Over Union (IOU) ofr two polygones a & b.
a and b should be a list of tuples of vertices [(x1,y1),...,(x22,y22)]
like this:
(x11,y11) ************* (x2,y2)
* *
* *
* *
(x1,y1) ************* (x22,y22)
'''
a = Polygon(a)
b = Polygon(b)
# Polygon of intersection
pol_i = a.intersection(b)
pol_u = a.union(b)
area_i = pol_i.area
area_u = pol_u.area
iou = float(area_i) / float(area_u + 1e-6)
return iou
```
|
{
"source": "JeffersonH44/deep-reinforcement-nd",
"score": 3
}
|
#### File: deep-reinforcement-nd/lab-taxi/agent.py
```python
import numpy as np
import random
from collections import defaultdict
class Agent:
def __init__(self, nA=6):
""" Initialize agent.
Params
======
- nA: number of actions available to the agent
"""
self.nA = nA
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.episodes = 1
self.gamma = 0.77
self.alpha = 0.25
self.epsilon = 0.01
self.eps_decay = 0.9
def get_epsilon_greedy_action(self, state):
if random.random() > self.epsilon:
return np.argmax(self.Q[state])
else:
return random.choice(np.arange(self.nA))
def select_action(self, state):
""" Given the state, select an action.
Params
======
- state: the current state of the environment
Returns
=======
- action: an integer, compatible with the task's action space
"""
return self.get_epsilon_greedy_action(state)
def curr_func(self, state, action):
# for sarsa learning
#return self.Q[state][action]
# for Q learning
return max(self.Q[state])
def __update(self, state, action, reward, next_state, next_action):
Qsa_next = self.curr_func(next_state, next_action) if next_action is not None else 0.0
Qsa_current = self.Q[state][action]
target = reward + (self.gamma * Qsa_next)
return Qsa_current + self.alpha*(target - Qsa_current)
def step(self, state, action, reward, next_state, done):
""" Update the agent's knowledge, using the most recently sampled tuple.
Params
======
- state: the previous state of the environment
- action: the agent's previous choice of action
- reward: last reward received
- next_state: the current state of the environment
- done: whether the episode is complete (True or False)
"""
next_action = self.get_epsilon_greedy_action(next_state) if not done else None
self.Q[state][action] = self.__update(state, action, reward, next_state, next_action)
# after all updates, update episode
if done:
self.epsilon = self.epsilon*self.eps_decay
```
#### File: src/agents/dqn_agent.py
```python
import numpy as np
import random
from src.models import ModelFactory
from src.utils import get_seed, get_device
from hydra import compose, initialize
from enum import Enum
import torch
import torch.nn.functional as F
import torch.optim as optim
from src.replay import ReplayFactory
device = get_device()
class DQNAgent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.cfg = compose(config_name="agent")
self.cfg_model = compose(config_name="model")
self.state_size = state_size
self.action_size = action_size
seed = get_seed()
self.seed = random.seed(seed)
# Q-Network
network_kind = ModelFactory[self.cfg.network_kind].value
self.qnetwork_local = network_kind(state_size, action_size).to(device)
self.qnetwork_target = network_kind(state_size, action_size).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=self.cfg_model.lr)
# Replay memory
self.replay_model = ReplayFactory[self.cfg.replay_kind]
self.memory = (self.replay_model.value)(action_size)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
# Define next q state calculator
NEXT_Q_STATE = {
"ddqn": self.__dqn_q_next_state,
"dqn": self.__ddqn_q_next_state
}
self.next_state_q_calculator = NEXT_Q_STATE[self.cfg.loss_kind]
# Define loss calculator function
LOSS_FUNCTION = {
"prioritized": self.__loss_for_prioritized_replay,
"uniform": self.__loss_for_uniform_replay
}
self.loss_calculator = LOSS_FUNCTION[self.cfg.replay_kind]
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % self.cfg.update_every
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > self.cfg_model.batch_size:
experiences = self.memory.sample()
self.learn(experiences, self.cfg.gamma)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
if self.replay_model == ReplayFactory.uniform:
states, actions, rewards, next_states, dones = experiences
indices, weights = (None, None)
else:
states, actions, rewards, next_states, dones, indices, weights = experiences
Q_targets_next_s = self.next_state_q_calculator(next_states)
Q_targets = rewards + (gamma * Q_targets_next_s * (1 - dones))
Q_expected = self.qnetwork_local(states).gather(1, actions)
loss = self.loss_calculator(
Q_expected,
Q_targets,
indices=indices,
weights=weights
)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, self.cfg.tau)
def __loss_for_uniform_replay(self, Q_expected, Q_targets, **kwargs):
return F.mse_loss(Q_expected, Q_targets)
def __loss_for_prioritized_replay(self, Q_expected, Q_targets, **kwargs):
indices = kwargs['indices']
weights = kwargs['weights']
elementwise_loss = F.mse_loss(Q_expected, Q_targets, reduction="none")
loss_for_per = elementwise_loss.detach().cpu().flatten().tolist()
self.memory.update_priorities(indices, loss_for_per)
return torch.mean(weights * elementwise_loss)
def __dqn_q_next_state(self, next_states):
return self.qnetwork_target(next_states).detach().max(1).values.unsqueeze(1)
def __ddqn_q_next_state(self, next_states):
return self.qnetwork_target(next_states).gather(1,
self.qnetwork_local(next_states).argmax(dim=1, keepdim=True)
)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
```
#### File: src/models/dueling_qnetwork.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.utils import get_seed
class DuelingQNetwork(nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.seed = torch.manual_seed(get_seed())
self.V_fc1 = nn.Linear(state_size, 64)
self.V_fc2 = nn.Linear(64, 64)
self.V_fc3 = nn.Linear(64, 1)
self.A_fc1 = nn.Linear(state_size, 64)
self.A_fc2 = nn.Linear(64, 64)
self.A_fc3 = nn.Linear(64, action_size)
def forward(self, state):
x = F.relu(self.V_fc1(state))
x = F.relu(self.V_fc2(x))
state_value = self.V_fc3(x)
x = F.relu(self.A_fc1(state))
x = F.relu(self.A_fc2(x))
advantage_values = self.A_fc3(x)
return state_value + (advantage_values - advantage_values.mean())
```
|
{
"source": "JeffersonH44/self-driving-car-ros",
"score": 2
}
|
#### File: celsius/scripts/reader.py
```python
import rospy
import re
import capnp
import zmq
from can_msgs.msg import Frame
from struct import unpack
from binascii import unhexlify
from std_msgs.msg import Float64, UInt8, UInt32
# capnp
capnp.remove_import_hook()
car_capnp = capnp.load('./src/celsius/msg/msg_capnp/car.capnp')
# zmq
PORT_CAR_STATE = 8021
IP = '*'
address = "tcp://%s:%d" % (IP, PORT_CAR_STATE)
print address
context = zmq.Context()
pub = context.socket(zmq.PUB)
pub.bind(address)
class Enumeration(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
def __setattr__(self, name, value):
raise RuntimeError("Cannot override values")
def __delattr__(self, name):
raise RuntimeError("Cannot delete values")
Constants = Enumeration(['TYPE', 'TOPIC', 'START', 'SIZE', 'ENDIAN', 'SIGN', 'SCALE', 'OFFSET', 'MIN', 'MAX', 'UNITS'])
MAX_SIZE_BITS = 64
TOPICS = {
514: {
'SPEED': {
Constants.TYPE: UInt32
}
},
145: {
'NEW_SIGNAL_2': {
Constants.TYPE: Float64
},
'_DIRECTION': {
Constants.TYPE: Float64
}
},
125: {
'FULL_BRAKES': {
Constants.TYPE: UInt8
},
'BRAKE_THROTTLE': {
Constants.TYPE: UInt8
}
}
}
file = './src/celsius/scripts/ford.dbc'
def create_topics():
rospy.init_node('publisher', anonymous=True)
for topic in TOPICS.keys():
current_topic = TOPICS[topic]
for name in TOPICS[topic].keys():
print 'current topic name', name
current_topic[name][Constants.TOPIC] = rospy.Publisher(name.strip('_'), current_topic[name][Constants.TYPE], queue_size=10)
def parse_number(data, options):
car_state = car_capnp.CarState.new_message()
hex_num = ''.join(data).encode('hex')
bits = int(hex_num, 16)
spec = '{fill}{align}{width}{type}'.format(fill='0', align='>', width=64, type='b')
bits = format(bits, spec)
## bits = bin(int(''.join(data).encode('hex'), 16))
output = {}
for elem in options.keys():
current_settings = options[elem]
start = current_settings[Constants.START]
end = start + current_settings[Constants.SIZE]
current_bits = bits[start:(end if end < MAX_SIZE_BITS else MAX_SIZE_BITS)]
# check signed (+: unsigned, -:unsigned)
if(current_settings[Constants.ENDIAN]): # is big endian
current_bits = current_bits[::-1]
output[elem] = int(current_bits, 2)# * current_settings[Constants.SCALE] + current_settings[Constants.OFFSET]
current_settings[Constants.TOPIC].publish(output[elem])
if elem == 'SPEED':
car_state.vEgo = output[elem]
# publish zmq 8021 for car state
print car_state
pub.send(car_state.to_bytes())
return output
def import_format(lines):
bo_regexp = re.compile("^BO\_ (\w+) (\w+) *: (\w+) (\w+)")
sg_regexp = re.compile("^SG\_ (\w+) : (\d+)\|(\d+)@(\d+)([\+|\-]) \(([0-9.+\-eE]+),([0-9.+\-eE]+)\) \[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] \"(.*)\" (.*)")
sgm_regexp = re.compile("^SG\_ (\w+) (\w+) *: (\d+)\|(\d+)@(\d+)([\+|\-]) \(([0-9.+\-eE]+),([0-9.+\-eE]+)\) \[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] \"(.*)\" (.*)")
i = 0
while i < len(lines):
current_line = lines[i].strip()
i += 1
if current_line.startswith('BO_ '):
print current_line
data = bo_regexp.match(current_line)
id = int(data.group(1))
if id in TOPICS.keys():
print "process id:", id
current_dic = TOPICS[id]
while current_line != '':
current_line = lines[i].strip()
i += 1
print current_line
if current_line.startswith('SG_'):
data = sg_regexp.match(current_line)
elem = data.group(1)
if elem in current_dic.keys():
to_fill = current_dic[elem]
to_fill[Constants.START] = int(data.group(2))
to_fill[Constants.SIZE] = int(data.group(3))
to_fill[Constants.ENDIAN] = int(data.group(4))
to_fill[Constants.SIGN] = data.group(5)
to_fill[Constants.SCALE] = float(data.group(6))
to_fill[Constants.OFFSET] = float(data.group(7))
to_fill[Constants.MIN] = float(data.group(8))
to_fill[Constants.MAX] = float(data.group(9))
to_fill[Constants.UNITS] = data.group(10)
print TOPICS
def callback(data):
if data.id in TOPICS.keys():
hex = data.data
print parse_number(hex, TOPICS[data.id])
# print data.id, int(''.join(hex).encode('hex')[-4:], 16) * 0.01
# rospy.loginfo(rospy.get_caller_id() + "I heard %s", ''.join(output.data).encode('hex'))
else:
pass
#print data.id
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
# rospy.init_node('listener', anonymous=True)
rospy.Subscriber("received_messages", Frame, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
with open(file) as f:
content = f.readlines()
import_format(content)
create_topics()
listener()
```
|
{
"source": "jeffersonHsieh/hiersumm",
"score": 3
}
|
#### File: src/abstractive/attn.py
```python
import math
import torch
import torch.nn as nn
class MultiHeadedAttention(nn.Module):
"""
Multi-Head Attention module from
"Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`.
Similar to standard `dot` attention but uses
multiple attention distributions simulataneously
to select relevant items.
.. mermaid::
graph BT
A[key]
B[value]
C[query]
O[output]
subgraph Attn
D[Attn 1]
E[Attn 2]
F[Attn N]
end
A --> D
C --> D
A --> E
C --> E
A --> F
C --> F
D --> O
E --> O
F --> O
B --> O
Also includes several additional tricks.
Args:
head_count (int): number of parallel heads
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True):
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = nn.Linear(model_dim,
head_count * self.dim_per_head) #batch_size * src_len* model_dim --> batch_size * src_len * model_dim
self.linear_values = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.linear_query = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.use_final_linear = use_final_linear
if(self.use_final_linear):
self.final_linear = nn.Linear(model_dim, model_dim)
def forward(self, key, value, query, mask=None,
layer_cache=None, type=None):
"""
Compute the context vector and the attention vectors.
Args:
key (`FloatTensor`): set of `key_len`
key vectors `[batch, key_len, dim]`
value (`FloatTensor`): set of `key_len`
value vectors `[batch, key_len, dim]`
query (`FloatTensor`): set of `query_len`
query vectors `[batch, query_len, dim]`
mask: binary mask indicating which keys have
non-zero attention `[batch, query_len, key_len]`
Returns:
(`FloatTensor`, `FloatTensor`) :
* output context vectors `[batch, query_len, dim]`
* one of the attention vectors `[batch, query_len, key_len]`
"""
# CHECKS
# batch, k_len, d = key.size()
# batch_, k_len_, d_ = value.size()
# aeq(batch, batch_)
# aeq(k_len, k_len_)
# aeq(d, d_)
# batch_, q_len, d_ = query.size()
# aeq(batch, batch_)
# aeq(d, d_)
# aeq(self.model_dim % 8, 0)
# if mask is not None:
# batch_, q_len_, k_len_ = mask.size()
# aeq(batch_, batch)
# aeq(k_len_, k_len)
# aeq(q_len_ == q_len)
# END CHECKS
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
key_len = key.size(1)
query_len = query.size(1)
def shape(x):
""" projection """
return x.view(batch_size, -1, head_count, dim_per_head) \
.transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous() \
.view(batch_size, -1, head_count * dim_per_head)
# 1) Project key, value, and query.
if layer_cache is not None:
if type == "self":
query, key, value = self.linear_query(query),\
self.linear_keys(query),\
self.linear_values(query)
key = shape(key)
value = shape(value)
if layer_cache is not None:
device = key.device
if layer_cache["self_keys"] is not None:
key = torch.cat(
(layer_cache["self_keys"].to(device), key),
dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat(
(layer_cache["self_values"].to(device), value),
dim=2)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif type == "context":
query = self.linear_query(query)
if layer_cache is not None:
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = layer_cache["memory_keys"],\
layer_cache["memory_values"]
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
query = shape(query)
key_len = key.size(2)
query_len = query.size(2)
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
scores = torch.matmul(query, key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(scores)
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores)
drop_attn = self.dropout(attn)
if(self.use_final_linear):
context = unshape(torch.matmul(drop_attn, value))
output = self.final_linear(context)
return output
else:
context = torch.matmul(drop_attn, value)
return context
# CHECK
# batch_, q_len_, d_ = output.size()
# aeq(q_len, q_len_)
# aeq(batch, batch_)
# aeq(d, d_)
# Return one attn
class MultiHeadedPooling(nn.Module):
def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True):
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedPooling, self).__init__()
self.head_count = head_count
self.linear_keys = nn.Linear(model_dim,
head_count)
self.linear_values = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
if (use_final_linear):
self.final_linear = nn.Linear(model_dim, model_dim)
self.use_final_linear = use_final_linear
def forward(self, key, value, mask=None):
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
def shape(x, dim=dim_per_head):
""" projection """
return x.view(batch_size, -1, head_count, dim) \
.transpose(1, 2)
def unshape(x, dim=dim_per_head):
""" compute context """
return x.transpose(1, 2).contiguous() \
.view(batch_size, -1, head_count * dim)
scores = self.linear_keys(key)
value = self.linear_values(value)
scores = shape(scores, 1).squeeze(-1)
value = shape(value)
# key_len = key.size(2)
# query_len = query.size(2)
#
# scores = torch.matmul(query, key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(scores)
scores = scores.masked_fill(mask, -1e18) #[batch_size * src_len]
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores)
drop_attn = self.dropout(attn)
context = torch.sum((drop_attn.unsqueeze(-1) * value), -2) #[batch_size * src_len * 1] *
if (self.use_final_linear):
context = unshape(context).squeeze(1)
output = self.final_linear(context)
return output
else:
return context
```
#### File: hiersumm/src/test_extsum_model.py
```python
import torch
from abstractive.model_builder import ExtSummarizer
from abstractive.data_loader import load_dataset
import sentencepiece
spm = sentencepiece.SentencePieceProcessor()
spm.Load('models/spm9998_3.model')
word_padding_idx = spm.PieceToId('<PAD>')
symbols = {'BOS': spm.PieceToId('<S>'), 'EOS': spm.PieceToId('</S>'), 'PAD': word_padding_idx,
'EOT': spm.PieceToId('<T>'), 'EOP': spm.PieceToId('<P>'), 'EOQ': spm.PieceToId('<Q>')}
print(symbols)
vocab_size = len(spm)
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
args = Namespace(accum_count=4, alpha=0, batch_size=10500,beam_size=5, beta1=0.9, beta2=0.998, data_path='ptdata/WIKI', dataset='',dec_dropout = 0.1, dec_hidden_size=256, dec_layers=1, decay_method='noam', emb_size=256, enc_dropout=0.1, enc_hidden_size=256, enc_layers=8, extractive=False, ff_size=1024,gpu_ranks=[0], heads=8, hier=True, inter_heads=8, inter_layers=[6, 7], label_smoothing=0.1, length_penalty='wu', log_file='log.txt', lr=3, max_generator_batches=32, max_grad_norm=0, max_length=250, max_wiki=5,min_length=20, mode='train', model_path='checkpoints/', n_best=1, optim='adam', report_every=100, report_rouge=False, result_path='../../results', save_checkpoint_steps=5000, seed=666, share_decoder_embeddings=True, share_embeddings=True, test_all=False, test_from='../../results', train_from='checkpoints/model_step_100000.pt', train_steps=1000000, trunc_src_nblock=24,trunc_src_ntoken=500, trunc_tgt_ntoken=400, valid_batch_size=10000, visible_gpus='5',warmup_steps=8000, world_size=1)
d_model = 256
args.train_from = 'models/wikisum_model_step_500000.pt'
checkpoint = torch.load(args.train_from, map_location=lambda storage, loc: storage)
model = ExtSummarizer(args, word_padding_idx, vocab_size, 'cuda', checkpoint)
```
|
{
"source": "jeffersonHsieh/tapas",
"score": 2
}
|
#### File: tapas/retrieval/e2e_eval_utils.py
```python
import ast
import collections
import dataclasses
import os
import re
import string
from typing import Any, Iterable, List, Mapping, Optional, Set, Text, Tuple
from absl import logging
import numpy as np
from tapas.experiments import prediction_utils as xprediction_utils
from tapas.protos import interaction_pb2
from tapas.scripts import prediction_utils
from tapas.utils import hybridqa_utils
from tapas.utils import text_utils
from official.nlp.bert import tokenization
@dataclasses.dataclass(frozen=True)
class EvalResult:
"""Retrieval + QA eval results."""
table_accuracy: float
table_precision: float
table_recall: float
answer_accuracy: float
answer_accuracy_table: Optional[float]
answer_accuracy_passage: Optional[float]
answer_precision: float
answer_token_precision: float
answer_token_recall: float
answer_token_f1: float
answer_token_f1_passage: Optional[float]
answer_token_f1_table: Optional[float]
oracle_answer_token_f1: float
oracle_answer_accuracy: float
def to_dict(self):
return dataclasses.asdict(self)
@dataclasses.dataclass(frozen=True)
class ScoredCandidate:
interaction: interaction_pb2.Interaction
answer: Text
score: float
@dataclasses.dataclass(frozen=True)
class Wordpiece:
start: int
end: int
token_id: int
def _find(subpieces, pieces):
"""Returns the index for which subpieces starts as a sublist of pieces or -1."""
for i in range(len(pieces) - len(subpieces) + 1):
if pieces[i:i + len(subpieces)] == subpieces:
return i
return -1
class DeTokenizer:
"""Detokenizes a list of pieces in the context of a table.
This is needed because the Standard Bert tokenization is lossy.
For example, it will add spaces around white space (3.5 -> 3 . 5) and
remove diacritics.
This class tries to find a list of pieces in a tokenized table and returns
the smallest byte span in the original cell text that yields the pieces
when tokenized.
"""
def __init__(self, vocab_file, do_lower_case=True, split_on_punc=True):
self._whitespace_tokenizer = tokenization.BasicTokenizer(
do_lower_case=False, split_on_punc=False)
self._punctuation_tokenizer = tokenization.BasicTokenizer(
do_lower_case=False, split_on_punc=split_on_punc)
self._full_tokenizer = tokenization.FullTokenizer(
vocab_file, do_lower_case=do_lower_case, split_on_punc=split_on_punc)
self._vocab = list(self._full_tokenizer.vocab.keys())
def _tokenize_text(self, text):
"""Tokenizes text by white-space, punctuation and then into word pieces."""
tokens = []
whitespace_normalized_text = ""
for whitespace_token in self._whitespace_tokenizer.tokenize(text):
for punctuation_token in self._punctuation_tokenizer.tokenize(
whitespace_token):
start = len(whitespace_normalized_text)
end = start + len(punctuation_token)
for wordpiece in self._full_tokenizer.tokenize(punctuation_token):
token_id = self._full_tokenizer.vocab[wordpiece]
tokens.append(Wordpiece(start=start, end=end, token_id=token_id))
whitespace_normalized_text += punctuation_token
whitespace_normalized_text += " "
return tokens, whitespace_normalized_text
def _convert_ids_to_text(
self,
token_ids,
):
"""Maps token ids to text."""
tokens = [self._vocab[token_id] for token_id in token_ids]
result = []
for token in tokens:
if token.startswith("##"):
result.append(token[2:])
else:
if result:
result.append(" ")
result.append(token)
return "".join(result)
def _get_unique_texts(
self,
table,
):
"""Yields all cell texts of `table` without duplicates."""
texts = set()
for row in table.rows:
for cell in row.cells:
if cell.text not in texts:
texts.add(cell.text)
yield cell.text
def _detokenize_text(
self,
text,
token_ids,
):
""""Searches for 'token_ids' in the tokenzied 'text'."""
tokens, normalized_text = self._tokenize_text(text)
logging.info(
"tokens: %s normalized text: %s token ids: %s",
tokens,
normalized_text,
token_ids,
)
start_token_index = _find(token_ids, [token.token_id for token in tokens])
if start_token_index < 0:
return None
end_token_index = start_token_index + len(token_ids) - 1
start_token = tokens[start_token_index]
end_token = tokens[end_token_index]
return normalized_text[start_token.start:end_token.end]
def detokenize(
self,
table,
pieces,
):
"""Detokenizes a list of pieces in a given table."""
for text in self._get_unique_texts(table):
detokenized_text = self._detokenize_text(text, pieces)
if detokenized_text is not None:
return detokenized_text
# Tokens couldn't be found in the table.
# We back off to direct detokenization
text = self._convert_ids_to_text(pieces)
logging.warning(
"Backing off to direct detokenization: '%s' ('%s')",
pieces,
text,
)
return text
def detokenize_token_answer(
self,
table,
token_answer,
):
"""Tries to search for the token ids in the specified cell."""
column = token_answer.column_index
row = token_answer.row_index
pieces = token_answer.token_ids
if 0 <= column < len(table.columns) and 0 <= row < len(table.rows):
cell = table.rows[row].cells[column]
text = self._detokenize_text(cell.text, pieces)
if text is not None:
return text
logging.error(
"Token answer not contained in specified cell: '%s' answer: '%s'",
cell.text,
self._convert_ids_to_text(pieces),
)
else:
logging.error(
"Invalid cell coordinates: "
"column: %d row: %d vs table columns: %d table rows: %d",
column,
row,
len(table.columns),
len(table.rows),
)
# Back-off to searching for the correct cell.
return self.detokenize(table, pieces)
def _get_example_id(question_id):
example_id, _, _ = text_utils.parse_question_id(question_id)
return example_id
def _guess_vocab_file(interaction_file):
interaction_dir = os.path.dirname(interaction_file)
vocab_dir = os.path.dirname(interaction_dir)
vocab_file = os.path.join(vocab_dir, "vocab.txt")
return vocab_file
def _is_valid_answer(token_ids):
if not token_ids:
return False
# Token id 1 correspond to the [EMPTY] token.
if len(token_ids) == 1 and token_ids[0] == 1:
return False
return True
def _get_reconstructed_answer_text(
detokenizer,
table,
prediction,
):
"""Reconstructs answer text using the detokenizer."""
if "answers" in prediction:
token_answers = xprediction_utils.token_answers_from_text(
prediction["answers"])
# Remove empty answers.
answers = [
detokenizer.detokenize_token_answer(table, token_answer)
for token_answer in token_answers
if _is_valid_answer(token_answer.token_ids)
]
if not answers:
return None
return _list_to_answer(answers)
# Support for legacy prediction files.
if "answer" in prediction:
answer_ids = ast.literal_eval(prediction["answer"])
if not _is_valid_answer(answer_ids):
return None
return detokenizer.detokenize(table, answer_ids)
raise ValueError("No answer value in prediction.")
def _get_scored_candidates(
detokenizer,
interactions,
predictions_by_qid,
):
"""Returns highest scoring interaction with answers."""
for interaction in interactions:
qid = interaction.questions[0].id
prediction = predictions_by_qid.get(qid, None)
if prediction is not None:
answer = _get_reconstructed_answer_text(
detokenizer,
interaction.table,
prediction,
)
if answer is not None:
# Assign score as 0.0 when "logits_cls" not present in predictions.
score = float(prediction.get("logits_cls", 0.0))
yield ScoredCandidate(
interaction=interaction,
answer=answer,
score=score,
)
def _is_correct_table(interaction):
return interaction.questions[0].answer.class_index == 1
def _list_to_answer(texts):
return " ".join(text for text in sorted(texts) if text)
def _get_answer_texts(
interactions):
"""Returns answer text."""
answer_texts = set()
for interaction in interactions:
question = interaction.questions[0]
for answer in [question.answer] + list(question.alternative_answers):
text = _list_to_answer(answer.answer_texts)
if text:
answer_texts.add(text)
return answer_texts
def _remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def _fix_whitespace(text):
return " ".join(text.split())
def _remove_punctuation(text):
return text.translate(str.maketrans("", "", string.punctuation))
def _lower(text):
return text.lower()
def _normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
return _fix_whitespace(_remove_articles(_remove_punctuation(_lower(s))))
def _tokenize(s):
if not s:
return []
return _normalize_answer(s).split()
def _compute_exact(a_gold, a_pred):
return int(_normalize_answer(a_gold) == _normalize_answer(a_pred))
def _compute_f1(a_gold, a_pred):
"""Computes token-based precision, recall and f-score."""
gold_toks = _tokenize(a_gold)
pred_toks = _tokenize(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
precision = num_same / len(pred_toks) if pred_toks else 1.0
recall = num_same / len(gold_toks) if gold_toks else 1.0
if precision + recall < 1.0e-5:
return 0.0, 0.0, 0.0
f1 = (2.0 * precision * recall) / (precision + recall)
return precision, recall, f1
def _mean(values):
if not values:
return 1.0
return np.mean(values)
def _get_best_metrics(
reference_answer_texts,
best_answer_text,
):
"""Evaluates prediction against all references."""
metrics = []
for reference_answer_text in reference_answer_texts:
# returns x \in [0,1]
pr, rc, f1 = _compute_f1(
reference_answer_text,
best_answer_text,
)
# return 0 or 1
em = _compute_exact(
reference_answer_text,
best_answer_text,
)
metrics.append({
"exact": em,
"f1": f1,
"precision": pr,
"recall": rc,
})
return max(metrics, key=lambda m: m["f1"])
def _get_question(
candidates,):
if not candidates:
return None
candidate = candidates[0]
if not candidate.questions:
return None
return candidate.questions[0].original_text
def _get_oracle_metrics(
candidates,
reference_answer_texts,
metric,
):
"""Get best value for metric."""
if not candidates:
return 0.0
values = []
for candidate in candidates:
metrics = _get_best_metrics(reference_answer_texts, candidate.answer)
values.append(metrics[metric])
return max(values)
def is_table_answer(question_id,
references):
return False if references is None else question_id in references["table"]
def is_passage_answer(question_id,
references):
return False if references is None else question_id in references["passage"]
def _evaluate_retrieval_e2e(
vocab_file,
interaction_list,
predictions,
references = None):
"""Computes e2e retrieval-QA metrics."""
detokenizer = DeTokenizer(vocab_file)
interactions = collections.defaultdict(list)
for interaction in interaction_list:
qid = interaction.questions[0].id
interactions[_get_example_id(qid)].append(interaction)
predictions_by_qid = {}
for prediction in predictions:
qid = prediction["question_id"]
predictions_by_qid[qid] = prediction
num_correct_tables = 0
num_pred_tables = 0
token_precisions = []
token_recalls = []
token_f1_scores = []
token_f1_scores_table = []
token_f1_scores_passage = []
token_correct = []
token_correct_table = []
token_correct_passage = []
oracle_token_f1_scores = []
oracle_token_correct = []
num_pred_answers = 0
num_examples_with_reference_table = 0
num_errors_logged = 0
# -----debug------
# pred_output = []
# -----debug ends ------
for candidates in interactions.values():
logging.log_every_n(
logging.INFO,
"Processed: %5d / %5d",
1000,
len(token_correct),
len(interactions),
)
reference_answer_texts = _get_answer_texts(candidates)
if not reference_answer_texts:
token_precisions.append(0.0)
token_recalls.append(0.0)
token_f1_scores.append(0.0)
token_correct.append(0.0)
continue
for interaction in candidates:
if _is_correct_table(interaction):
num_examples_with_reference_table += 1
break
results = list(
_get_scored_candidates(
detokenizer,
candidates,
predictions_by_qid,
))
if not results:
logging.log_every_n(logging.INFO, "No candidate.", 1000)
best_answer_text = ""
example_id = "NOT_FOUND"
else:
best_result = max(results, key=lambda result: result.score)
example_id = text_utils.get_example_id(best_result.interaction.id)
best_answer_text = best_result.answer
num_pred_tables += 1
# We reconstruct the orginal text. This is can be problematic for example
# when the answer contains the "[UNK]" token.
if best_answer_text:
num_pred_answers += 1
if _is_correct_table(best_result.interaction):
num_correct_tables += 1
# -----debug------
# out_results = []
# for res in results:
# mets = _get_best_metrics(reference_answer_texts, res.answer)
# out_results.append({
# 'qid':res.interaction.questions[0].id,
# 'table_correct':_is_correct_table(res.interaction),
# 'pred': res.answer,
# 'pred_score': res.score,
# 'correct':mets['exact'],
# 'f1':mets['f1']})
# pred_output.append(out_results)
# -----debug ends------
metrics = _get_best_metrics(reference_answer_texts, best_answer_text)
token_precisions.append(metrics["precision"])
token_recalls.append(metrics["recall"])
token_f1_scores.append(metrics["f1"])
token_correct.append(metrics["exact"])
if is_passage_answer(example_id, references):
token_f1_scores_passage.append(metrics["f1"])
token_correct_passage.append(metrics["exact"])
if is_table_answer(example_id, references):
token_f1_scores_table.append(metrics["f1"])
token_correct_table.append(metrics["exact"])
oracle_token_correct.append(
_get_oracle_metrics(
results,
reference_answer_texts,
metric="exact",
))
oracle_token_f1_scores.append(
_get_oracle_metrics(
results,
reference_answer_texts,
metric="f1",
))
if metrics["exact"] != 1 and num_errors_logged < 100:
num_errors_logged += 1
logging.info("question: '%s' references: %s prediction: '%s'",
_get_question(candidates), reference_answer_texts,
best_answer_text)
# -----debug------
# import pdb;pdb.set_trace()
# split = 'test'
# outpath = os.path.join(os.path.dirname(
# os.path.dirname(
# os.path.abspath(vocab_file
# )
# )
# ),f'{split}_marked_reader_output.json')
# import json
# with open(outpath,'w') as f:
# json.dump(pred_output,f)
# -----debug ends------
precision = _mean(token_precisions)
recall = _mean(token_recalls)
f1_score = _mean(token_f1_scores)
answer_accuracy = _mean(token_correct)
if references is not None:
f1_score_table = _mean(token_f1_scores_table)
f1_score_passage = _mean(token_f1_scores_passage)
answer_accuracy_table = _mean(token_correct_table)
answer_accuracy_passage = _mean(token_correct_passage)
else:
f1_score_table = None
f1_score_passage = None
answer_accuracy_table = None
answer_accuracy_passage = None
oracle_f1_score = _mean(oracle_token_f1_scores)
oracle_answer_accuracy = _mean(oracle_token_correct)
total = len(interactions)
table_accuracy = num_correct_tables / total if total > 0 else 1.0
table_precision = 1.0
if num_pred_tables > 0:
table_precision = num_correct_tables / num_pred_tables
table_recall = 1.0
if num_examples_with_reference_table > 0:
table_recall = num_correct_tables / num_examples_with_reference_table
answer_precision = 1.0
if num_pred_answers > 0:
answer_precision = sum(token_correct) / num_pred_answers
if total:
oracle_table_accuracy = num_examples_with_reference_table / total
logging.info("oracle table accuracy: %s", oracle_table_accuracy)
return EvalResult(
table_accuracy=table_accuracy,
table_precision=table_precision,
table_recall=table_recall,
answer_accuracy=answer_accuracy,
answer_accuracy_table=answer_accuracy_table,
answer_accuracy_passage=answer_accuracy_passage,
answer_precision=answer_precision,
answer_token_precision=precision,
answer_token_recall=recall,
answer_token_f1=f1_score,
answer_token_f1_table=f1_score_table,
answer_token_f1_passage=f1_score_passage,
oracle_answer_token_f1=oracle_f1_score,
oracle_answer_accuracy=oracle_answer_accuracy)
def evaluate_retrieval_e2e(
interaction_file,
prediction_file,
references_file = None,
vocab_file = None,
):
"""Computes e2e retrieval-QA metrics."""
vocab_file = vocab_file or _guess_vocab_file(interaction_file)
references = None
if references_file is not None:
references = hybridqa_utils.get_hybridqa_references(references_file)
logging.info("Vocab file: %s ", vocab_file)
logging.info("Read: %s ", interaction_file)
interactions = prediction_utils.iterate_interactions(interaction_file)
logging.info("Read: %s ", prediction_file)
predictions = prediction_utils.iterate_predictions(prediction_file)
return _evaluate_retrieval_e2e(vocab_file, interactions, predictions,
references)
# TODO(eisenjulian): merge this with _evaluate_retrieval_e2e() or pull the
# common logic in a separate function and call that in _evaluate_retrieval_e2e.
def generate_hybridqa_codalab_predictions(
interaction_file,
prediction_file):
"""Generates Codaab prediction files for HybridQA Competition.
This function generates the json prediction files used to submit to HybridQA
competition hosted on Codalab. (go/hybridqa-competition)
Args:
interaction_file: A TF record file containing the examples as interactions.
prediction_file: A TSV file that is the output of the table-classifier
predict job on the input interactions.
Yields:
An iterable of json serializable python dicts.
"""
vocab_file = _guess_vocab_file(interaction_file)
logging.info("Vocab file: %s ", vocab_file)
logging.info("Read: %s ", interaction_file)
interactions = prediction_utils.iterate_interactions(interaction_file)
logging.info("Read: %s ", prediction_file)
predictions = prediction_utils.iterate_predictions(prediction_file)
detokenizer = DeTokenizer(vocab_file)
interactions_by_qid = collections.defaultdict(list)
for interaction in interactions:
qid = interaction.questions[0].id
interactions_by_qid[_get_example_id(qid)].append(interaction)
predictions_by_qid = {}
for prediction in predictions:
qid = prediction["question_id"]
# TODO(eisenjulian): Select the best answer using model scores.
predictions_by_qid[qid] = prediction
for qid, candidates in interactions_by_qid.items():
answer_text = ""
results = list(
_get_scored_candidates(
detokenizer,
candidates,
predictions_by_qid,
))
example_id = text_utils.get_example_id(qid)
if results:
best_result = max(results, key=lambda result: result.score)
answer_text = best_result.answer
yield {"question_id": example_id, "pred": answer_text}
```
|
{
"source": "jeffersonkr/drink_flow_control",
"score": 2
}
|
#### File: drink_flow_control/core/tasks.py
```python
from celery.schedules import crontab
from celery.task import periodic_task
from .models.user import User
@periodic_task(run_every=crontab(hour=00, minute=40))
def every_monday_morning():
users = User.objects.all()
for user in users:
user.total_drunk_today = user.total_water_per_day
user.save()
```
|
{
"source": "jeffersonkr/pycpfcnpj",
"score": 4
}
|
#### File: pycpfcnpj/pycpfcnpj/cnpj.py
```python
from . import calculation as calc
from . import compatible as compat
@compat.check_special_characters
def validate(cnpj_number):
"""This function validates a CNPJ number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cnpj_number: a CNPJ number to be validated. Only numbers.
:type cnpj_number: string
:return: Bool -- True for a valid number, False otherwise.
"""
_cnpj = compat.clear_punctuation(cnpj_number)
if len(_cnpj) != 14 or len(set(_cnpj)) == 1:
return False
first_part = _cnpj[:12]
second_part = _cnpj[:13]
first_digit = _cnpj[12]
second_digit = _cnpj[13]
if first_digit == calc.calculate_first_digit(
first_part
) and second_digit == calc.calculate_second_digit(second_part):
return True
return False
```
#### File: pycpfcnpj/tests/cnpj_tests.py
```python
import unittest
from pycpfcnpj import cnpj
class CNPJTests(unittest.TestCase):
"""docstring for CNPJTests"""
def setUp(self):
self.valid_cnpj = "11444777000161"
self.masked_valid_cnpj = "11.444.777/0001-61"
self.invalid_cnpj = "11444777000162"
self.masked_invalid_cnpj = "11.444.777/0001-62"
self.invalid_cnpj_whitespaces = "11444 777000161"
self.invalid_cnpj_with_alphabetic = "11444d777000161"
self.invalid_cnpj_with_special_character = "+5575999769162"
def test_validate_cnpj_true(self):
self.assertTrue(cnpj.validate(self.valid_cnpj))
def test_validate_masked_cnpj_true(self):
self.assertTrue(cnpj.validate(self.masked_valid_cnpj))
def test_validate_cnpj_false(self):
self.assertFalse(cnpj.validate(self.invalid_cnpj))
def test_validate_masked_cnpj_false(self):
self.assertFalse(cnpj.validate(self.invalid_cnpj))
def test_validate_cnpj_with_same_numbers(self):
for i in range(10):
self.assertFalse(cnpj.validate("{0}".format(i) * 14))
def test_validate_cnpj_with_whitespaces(self):
self.assertFalse(cnpj.validate(self.invalid_cnpj_whitespaces))
def test_validate_cnpj_with_alphabetic_characters(self):
self.assertFalse(cnpj.validate(self.invalid_cnpj_with_alphabetic))
def test_validate_cnpj_with_special_characters(self):
self.assertFalse(cnpj.validate(self.invalid_cnpj_with_special_character))
if __name__ == "__main__":
unittest.main(verbosity=2)
```
#### File: pycpfcnpj/tests/cpfcnpj_tests.py
```python
import unittest
from pycpfcnpj import cpfcnpj
class CPFCNPJTests(unittest.TestCase):
"""docstring for CPFCNPJTests"""
def setUp(self):
self.valid_cpf = "11144477735"
self.invalid_cpf = "11144477736"
self.invalid_cpf_size = "111444777"
self.invalid_cpf_whitespaces = "111444 77735"
self.valid_cnpj = "11444777000161"
self.invalid_cnpj = "11444777000162"
self.invalid_cnpj_size = "114447770001"
self.invalid_cnpj_whitespaces = "11444 777000161"
self.invalid_cpf_with_alphabetic = "111444A77735"
self.invalid_cnpj_with_alphabetic = "11444d777000161"
self.invalid_cpf_with_special_character = "*55759997&9"
self.invalid_cnpj_with_special_character = "+557599976%162"
self.mascared_valid_cpf = "111.444.777-35"
self.mascared_invalid_cpf = "111.444.777-36"
self.mascared_invalid_cpf_size = "111.444.777"
self.mascared_valid_cnpj = "11.444.777/0001-61"
self.mascared_invalid_cnpj = "11.444.777/0001-62"
self.mascared_invalid_cnpj_size = "114.447/7700-01"
def test_validate_cpf_true(self):
self.assertTrue(cpfcnpj.validate(self.valid_cpf))
def test_validate_cpf_false(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cpf))
def test_validate_unicode_cpf_tru(self):
self.assertTrue(cpfcnpj.validate("11144477735"))
def test_validate_cnpj_true(self):
self.assertTrue(cpfcnpj.validate(self.valid_cnpj))
def test_validate_cnpj_false(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cnpj))
def test_validate_unicode_cnpj_true(self):
self.assertTrue(cpfcnpj.validate("11444777000161"))
def test_wrong_cpf_size(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cpf_size))
def test_wrong_cnpj_size(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cnpj_size))
def mascared_test_validate_cpf_true(self):
self.assertTrue(cpfcnpj.validate(self.mascared_valid_cpf))
def mascared_test_validate_cpf_false(self):
self.assertFalse(cpfcnpj.validate(self.mascared_invalid_cpf))
def mascared_test_validate_cnpj_true(self):
self.assertTrue(cpfcnpj.validate(self.mascared_valid_cnpj))
def mascared_test_validate_cnpj_false(self):
self.assertFalse(cpfcnpj.validate(self.mascared_invalid_cnpj))
def mascared_test_wrong_cpf_size(self):
self.assertFalse(cpfcnpj.validate(self.mascared_invalid_cpf_size))
def mascared_test_wrong_cnpj_size(self):
self.assertFalse(cpfcnpj.validate(self.mascared_invalid_cnpj_size))
def test_validate_cnpj_with_whitespace(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cnpj_whitespaces))
def test_validate_cpf_with_whitespace(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cpf_whitespaces))
def test_validate_cnpj_with_alphabetic_characters(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cnpj_with_alphabetic))
def test_validate_cpf_with_alphabetic_characters(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cpf_with_alphabetic))
def test_validate_cnpj_with_special_characters(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cnpj_with_special_character))
def test_validate_cpf_with_special_characters(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cpf_with_special_character))
if __name__ == "__main__":
unittest.main(verbosity=2)
```
|
{
"source": "JeffersonK/storm-starter-remote",
"score": 2
}
|
#### File: multilang/resources/storm.py
```python
import sys
import os
import traceback
try:
import cjson
json_encode = cjson.encode
json_decode = lambda x: cjson.decode(x, all_unicode=True)
except ImportError:
import json
json_encode = lambda x: json.dumps(x, ensure_ascii=False)
json_decode = lambda x: json.loads(unicode(x))
def readStringMsg():
msg = ""
while True:
line = sys.stdin.readline()[0:-1]
if line == "end":
break
msg = msg + line + "\n"
return msg[0:-1]
ANCHOR_TUPLE = None
#reads lines and reconstructs newlines appropriately
def readMsg():
return json_decode(readStringMsg())
def sendToParent(s):
print s
print "end"
sys.stdout.flush()
def sync():
print "sync"
sys.stdout.flush()
def sendpid(heartbeatdir):
pid = os.getpid()
print pid
sys.stdout.flush()
open(heartbeatdir + "/" + str(pid), "w").close()
def sendMsgToParent(amap):
sendToParent(json_encode(amap))
def emittuple(tup, stream=None, anchors = [], directTask=None):
global ANCHOR_TUPLE
if ANCHOR_TUPLE is not None:
anchors = [ANCHOR_TUPLE]
m = {"command": "emit"}
if stream is not None:
m["stream"] = stream
m["anchors"] = map(lambda a: a.id, anchors)
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def emit(tup, stream=None, anchors = []):
emittuple(tup, stream=stream, anchors=anchors)
#read back task ids
return readMsg()
def emitDirect(task, tup, stream=None, anchors = []):
emittuple(tup, stream=stream, anchors=anchors, directTask=task)
def ack(tup):
sendMsgToParent({"command": "ack", "id": tup.id})
def fail(tup):
sendMsgToParent({"command": "fail", "id": tup.id})
def log(msg):
sendMsgToParent({"command": "log", "msg": msg})
# read the stormconf and context
def readenv():
conf = readMsg()
context = readMsg()
return [conf, context]
def readtuple():
tupmap = readMsg()
return Tuple(tupmap["id"], tupmap["comp"], tupmap["stream"], tupmap["task"], tupmap["tuple"])
def initbolt():
heartbeatdir = readStringMsg()
sendpid(heartbeatdir)
return readenv()
class Tuple:
def __init__(self, id, component, stream, task, values):
self.id = id
self.component = component
self.stream = stream
self.task = task
self.values = values
class Bolt:
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
conf, context = initbolt()
self.initialize(conf, context)
try:
while True:
tup = readtuple()
self.process(tup)
sync()
except Exception, e:
log(traceback.format_exc(e))
class BasicBolt:
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global ANCHOR_TUPLE
conf, context = initbolt()
self.initialize(conf, context)
try:
while True:
tup = readtuple()
ANCHOR_TUPLE = tup
self.process(tup)
ack(tup)
sync()
except Exception, e:
log(traceback.format_exc(e))
class Spout:
pass
```
|
{
"source": "JeffersonLab/graphical-alarm-client",
"score": 2
}
|
#### File: graphical-alarm-client/scripts/JAWSTableView.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPalette
from FilterMenu import *
from Actions import *
from PropertyDialog import *
from utils import *
#Parent tableview
class JAWSTableView(QtWidgets.QTableView) :
"""
JAWSTableView - parent for AlarmTable and OverrideTable
"""
def __init__(self,*args,**kwargs) :
super(JAWSTableView,self).__init__(*args,**kwargs)
#Adjusts columns to contents
self.setSizeAdjustPolicy(
QtWidgets.QAbstractScrollArea.AdjustToContents)
#Expands table if more rows added.
self.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.MinimumExpanding)
#Allow the sections (columns) to be rearranged
header = self.horizontalHeader()
header.setSectionsMovable(True)
#Add a context menu (3rd mouse) to the header
header.setContextMenuPolicy(Qt.CustomContextMenu)
header.customContextMenuRequested.connect(self.selectHeader)
#Get the single alarm that is selected.
def getSelectedAlarm(self) :
"""
Get the single selected alarm
:returns JAWSAlarm
"""
alarmlist = self.getSelectedAlarms()
alarm = None
if (len(alarmlist) > 0) :
alarm = alarmlist[0]
return(alarm)
def getSelectedAlarms(self) :
"""
Get the list of alarms that have been selected on the table
:returns list of JAWSAlarms
"""
alarmlist = []
#Access both the proxy model and the sourcemodel
proxymodel = getProxy()
sourcemodel = getModel()
#The indices returned are that of the proxymodel, which is what
#the table LOOKS like. We need the source model index to identify the
#actual selected alarm.
indices = self.selectedIndexes()
for index in indices :
proxy_index = index
#convert the proxy_index into a source_index,
#and find the row that is associated with the selected alarm(s)
source_row = proxymodel.mapToSource(proxy_index).row()
alarm = sourcemodel.data[source_row]
alarmlist.append(alarm)
return(alarmlist)
def rowSelected(self) :
""" #If a row is selected, configure tool bar as appropriate. """
getManager().getToolBar().configureToolBar()
def selectHeader(self,vis) :
"""
User has requested the contextmenu
signal passes in the header position (visible column index)
If the columns have been rearranged,
they will have a "visualIndex" and a "logicalIndex"
Need to know what the logicalIndex is of the passed in vis
"""
col = self.horizontalHeader().logicalIndexAt(vis)
#Most columns have a filter associated with it
name = getModel().getColumnName(col)
jawsfilter = getFilterByHeader(name)
#If there is a filter, show the filter menu
if (jawsfilter != None) :
if (jawsfilter.getName() == 'timestamp') :
menu = ExclusiveFilterMenu(jawsfilter)
else :
menu = FilterMenu(jawsfilter)
action = menu.exec_(self.mapToGlobal(vis))
#Action common to JAWSTables
def addPropertyAction(self,menu) :
""" Display the properties of the select alarm
"""
alarm = self.getSelectedAlarm()
PropertyAction(menu,alarm).addAction()
def getDefaultSort(self) :
""" Determine the default sort if not in user prefs
"""
sortcolumn = getModel().getColumnIndex(self.defaultsort)
return(sortcolumn,self.defaultorder)
#Extend the table view for our own AlarmTable
class AlarmTable(JAWSTableView) :
""" Extend JAWSTableView for viewing the ActiveAlarms
"""
def __init__(self,*args,**kwargs) :
super(AlarmTable,self).__init__(*args,**kwargs)
self.defaultsort = "timestamp"
self.defaultorder = 1
def contextMenuEvent(self,event) :
"""
Context menu when user right-mouse clicks in a cell.
Multiple rows/columns/cells can be selected
"""
menu = QtWidgets.QMenu(self)
#TitleAction is a placeholder for the title of the context menu
self.addTitleAction(menu)
#separator between the title and actions.
separatorbg = "background: red"
style = "QMenu::separator {background: red;}"
menu.setStyleSheet(style)
separator = menu.addSeparator()
self.mainmenu = menu
self.addAckAction(menu)
self.addPropertyAction(menu)
self.addOverrideAction(menu)
action = self.performAction(event)
if (action != None) :
action.performAction(self.getSelectedAlarms())
def performAction(self,event) :
#Not sure why (ugh) but if the menu calls
#mapToGlobal directly, the focus remains on the
#AlarmTable...instead of the potential dialog
action = self.mainmenu.exec_(self.mapToGlobal(event.pos()))
return(action)
def addTitleAction(self,menu) :
TitleAction(menu).addAction()
def addOverrideAction(self,menu) :
OverrideAction(menu).addAction()
#User can acknowledge the selected alarms
def addAckAction(self,menu) :
AckAction(menu).addAction()
#The latched and status column (col=0, col=1)
#Displays the status indicators.
#Must create our own delegate.
class StatusDelegate(QtWidgets.QStyledItemDelegate) :
def __init__(self,statusindex) :
super(StatusDelegate,self).__init__()
self.statusindex = statusindex
#Size of the column
def sizeHint(self,option,index) :
return(QtCore.QSize(50,50))
#Must override this method to use an image
def paint(self,painter,option,index) :
row = index.row()
col = index.column()
alarm = getModel().data[row]
#The data is the value from the "data" subroutine.
#In this case the "latched" and "sevr" status'
data = index.data()
#The alarm associated with this col=0 or col=1
if (col == self.statusindex) :
if (data != None) :
image = GetStatusImage(data)
if (image == None) :
return
small = image.scaled(
option.rect.size(),Qt.KeepAspectRatio);
x = option.rect.center().x() - small.rect().width() / 2 + 5
y = option.rect.center().y() - small.rect().height() / 2 + 2
painter.drawPixmap(x, y, small)
#Create the ShelfTable
class OverrideTable(JAWSTableView) :
def __init__(self,*args,**kwargs) :
super(OverrideTable,self).__init__(*args,**kwargs)
self.defaultsort = "timeleft"
self.defaultorder = 1
self.columnlist = list(getManager().columns)
statusindex = self.columnlist.index("status")
self.setItemDelegateForColumn(statusindex, StatusDelegate(statusindex))
#Context menu when user right-mouse clicks in a cell.
#Multiple rows/columns/cells can be selected
def contextMenuEvent(self,event) :
return
menu = QtWidgets.QMenu(self)
self.AddUnShelveAction(menu)
self.AddShelfAction(menu)
self.addPropertyAction(menu)
action = menu.exec_(self.mapToGlobal(event.pos()))
```
#### File: scripts/jlab_jaws_helper/JAWSConnection.py
```python
import os
import pwd
import types
import pytz
import time
from datetime import datetime
# We can't use AvroProducer since it doesn't support string keys, see: https://github.com/confluentinc/confluent-kafka-python/issues/428
# COMMON/GENERAL
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka import SerializingProducer
from jlab_jaws.avro.entities import *
from jlab_jaws.avro.serde import AlarmRegistrationSerde
from jlab_jaws.avro.serde import AlarmActivationUnionSerde
from jlab_jaws.avro.serde import EffectiveActivationSerde
from jlab_jaws.avro.serde import EffectiveAlarmSerde
from jlab_jaws.avro.serde import AlarmOverrideKeySerde, AlarmOverrideUnionSerde
from jlab_jaws.avro.serde import AlarmClassSerde
from jlab_jaws.avro.serde import EffectiveRegistrationSerde
# CONSUMER
from confluent_kafka.serialization import StringDeserializer, StringSerializer
from jlab_jaws.eventsource.table import EventSourceTable
def convert_timestamp(seconds) :
""" Convert the message timestamp to local timezone.
:param seconds : number of seconds
:type seconds : int
:returns date string for local timezone
"""
#Work in utc time, then convert to local time zone.
ts = datetime.fromtimestamp(seconds//1000)
utc_ts = pytz.utc.localize(ts)
#Finally convert to EST.
est_ts = utc_ts.astimezone(pytz.timezone("America/New_York"))
return(est_ts)
#Convert the timestamp into something readable
def get_msg_timestamp(msg) :
""" Get timestamp of message
:param msg : topic messge
:type msg: 'cimpl.Message'
:returns timestamp in local time zone
"""
#timestamp from Kafka is in UTC
timestamp = msg.timestamp()
# print(msg.topic(),"GETMSGTIMESTAMP",convert_timestamp(timestamp[1]),"\n")
return(convert_timestamp(timestamp[1]))
def get_headers(msg) :
""" Get message headers
:param msg : topic messge
:type msg: 'cimpl.Message'
:returns list of headers
"""
headers = msg.headers()
return(headers)
def get_alarmname(msg) :
name = msg.key()
if (not isinstance(name,str)) :
name_dict = name.__dict__
if ('name' in name_dict) :
name = name_dict['name']
return(name)
def get_msg_key(msg) :
""" Get message key.
:param msg : topic messge
:type msg: 'cimpl.Message'
:returns key
"""
key = msg.key()
# topic = msg.topic()
# if (topic in CONSUMERS) :
# consumer = CONSUMERS[topic]
# key = consumer.get_msg_key(msg)
return(key)
def get_msg_value(msg) :
""" Get message key.
:param msg : topic messge
:type msg: 'cimpl.Message'
:returns value object
"""
# print(msg.topic(),"GETMSGVALUE",msg.value(),"\n")
return(msg.value())
def get_msg_topic(msg) :
""" Get message topic
:param msg : topic messge
:type msg: 'cimpl.Message'
:returns topic
"""
return(msg.topic())
def get_alarm_class_list() :
""" Get list of valid alarm class names
:returns list AlarmClass member names
"""
return(AlarmClass._member_names_)
def get_location_list() :
""" Get list of valid locations
:returns list AlarmLocation member names
"""
return(AlarmLocation._member_names_)
def get_category_list() :
""" Get list of valid categories
:returns list AlarmCategory member names
"""
return(AlarmCategory._member_names_)
def get_priority_list() :
""" Get list of valid priorities
:returns list AlarmPriority member names
"""
return(AlarmPriority._member_names_)
def get_override_reasons() :
return(ShelvedReason._member_names_)
def get_override_types() :
return(OverriddenAlarmType._member_names_)
class JAWSConnection(object) :
""" This class sets up the kafka connection for creating consumers and
producers
"""
def __init__(self,topic) :
""" Create a kafkaconnection for the topic
:param topic: Name of topic
:type topic: string
"""
self.topic = topic
#Magic Kafka configuration
bootstrap_servers = os.environ.get('BOOTSTRAP_SERVERS', 'localhost:9092')
self.bootstrap_servers = bootstrap_servers
conf = {'url': os.environ.get('SCHEMA_REGISTRY', 'http://localhost:8081')}
self.schema_registry = SchemaRegistryClient(conf)
self.params = types.SimpleNamespace()
self.key_deserializer = StringDeserializer('utf_8')
self.key_serializer = StringSerializer()
```
#### File: graphical-alarm-client/scripts/OverrideModel.py
```python
from JAWSModel import *
class OverrideModel(JAWSModel) :
def __init__(self,data=None,parent = None, *args) :
super(OverrideModel,self).__init__(data,parent,*args)
def data(self,index,role) :
row = index.row()
col = index.column()
if (role == Qt.TextAlignmentRole) :
return(Qt.AlignCenter)
alarm = self.data[row]
display = self.getDisplay(alarm)
if (col == self.getColumnIndex('status')) :
if (role == Qt.DecorationRole) :
image = GetStatusImage(display)
if (image != None) :
return(QIcon(image))
else :
print("NO IMAGE FOR ",display)
#Insert the appropriate information into the display, based
#on the column. Column "0" is handled by the StatusDelegate
if role == Qt.DisplayRole :
if (col == self.getColumnIndex('status')) :
#Does the alarm have a severity?
if (display == None) :
return
return(display)
if (col == self.getColumnIndex('name')) :
return(alarm.get_name())
alarmprop = self.getColumnName(col)
if ("date" in alarmprop or "expiration" in alarmprop) :
val = alarm.get_property(alarmprop)
if (val != None) :
return(val.strftime("%Y-%m-%d %H:%M:%S"))
elif ("left" in alarmprop) :
timeleft = alarm.get_property('timeleft')
return(self.displayTimeLeft(timeleft))
else :
val = alarm.get_property(alarmprop,name=True)
return(val)
def getDisplay(self,alarm) :
display = "ALARM"
state = alarm.get_state(name=True)
if ("normal" in state.lower()) :
display = "NORMAL"
return(display)
def displayTimeLeft(self,timeleft) :
if (timeleft == None) :
return("")
seconds = timeleft.seconds
days = timeleft.days
displaytime = ""
if (days > 0) :
displaytime = str(days) + " days "
if (seconds < 60) :
displaytime = displaytime + str(seconds) + " sec"
elif (seconds > 60 and seconds < 3600) :
minutes = '{:0.2f}'.format(seconds / 60)
displaytime = displaytime + minutes + " min"
elif (seconds > 3600 and seconds < 86400) :
if (days > 0) :
hours = str(int(seconds/3600))
getTable().horizontalHeader().setSectionResizeMode(3,
QtWidgets.QHeaderView.ResizeToContents)
else :
hours = '{:0.2f}'.format(seconds / 3600)
displaytime = displaytime + hours + " hours"
return(displaytime)
class OverrideProxy(JAWSProxyModel) :
def __init__(self,*args,**kwargs) :
super(OverrideProxy,self).__init__(*args,**kwargs)
def getDisplay(self,alarm) :
return(self.sourceModel().getDisplay(alarm))
```
#### File: graphical-alarm-client/scripts/PrefDialog.py
```python
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt,QMimeData
import datetime
import time
import pytz
from csv import reader,writer,DictReader,DictWriter
from utils import *
from Actions import *
from FilterMenu import *
from JAWSDialog import *
#The Preferences Dialog widget.
#The dialog is made up of individual preference widgets that
#manage themselves
class PrefDialog(JAWSDialog) :
def __init__(self,parent=None,*args,**kwargs) :
super(PrefDialog,self).__init__(parent,*args,**kwargs)
vlayout = self.vlayout
filterprefs = FilterPrefs(self)
vlayout.addWidget(filterprefs)
self.filterprefs = filterprefs
self.prefwidgets.append(filterprefs)
# timefilter = TimeFilterButton(self)
# vlayout.addWidget(timefilter)
# defaultrows = DefaultRowPrefs(self)
# vlayout.addWidget(defaultrows)
# self.defaultrows = defaultrows
#self.prefwidgets.append(defaultrows)
#Allow the user to select the default sort column
sortprefs = SortPrefs(self)
vlayout.addWidget(sortprefs)
self.sortprefs = sortprefs
self.prefwidgets.append(sortprefs)
#Allow the user to hide and display the columns that they want
displayprefs = DisplayPrefs(self)
vlayout.addWidget(displayprefs)
self.displayprefs = displayprefs
self.prefwidgets.append(displayprefs)
#The "apply" and "close" buttons.
buttons = self.makeButtons()
vlayout.addWidget(buttons)
self.setLayout(vlayout)
self.layout().setAlignment(Qt.AlignTop)
self.show()
#Configure prefwidgets (if defined)
def configureDialog(self) :
for prefwidget in self.prefwidgets :
prefwidget.configureDialog()
#Called to reset preferences
def reset(self) :
for pref in self.prefwidgets :
pref.reset()
#The accessible buttons on the dialog
def makeButtons(self) :
layout = QtWidgets.QHBoxLayout()
widget = QtWidgets.QWidget()
widget.setLayout(layout)
acceptbutton = QtWidgets.QPushButton("Apply Changes")
acceptbutton.clicked.connect(self.applyChanges)
layout.addWidget(acceptbutton)
getManager().setButtonStyle(acceptbutton)
#acceptbutton.setStyleSheet('QPushButton{background-color: darkRed; color: white}')
cancelbutton = QtWidgets.QPushButton("Close")
cancelbutton.clicked.connect(self.Close)
layout.addWidget(cancelbutton)
# cancelbutton.setStyleSheet('QPushButton{background-color: darkRed; color: white}')
getManager().setButtonStyle(cancelbutton)
return(widget)
#Apply changes for each applicable section
def applyChanges(self) :
#Does the user also want to save the changes?
save = self.SaveChanges()
for pref in self.prefwidgets :
if (save) :
pref.SaveChanges()
pref.applyChanges()
if (save) :
getManager().savePrefs()
#Ask if the user wants to save the changes
def SaveChanges(self) :
save = False
msgBox = QtWidgets.QMessageBox.question(self,"Save?",
"Save your preferences?",
QtWidgets.QMessageBox.Save| QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.Save)
#If so, get the pref file name, and if it already exists,
#remove it, so it can be replaced.
if (msgBox == QtWidgets.QMessageBox.Save) :
save = True
return(save)
#Close the dialog
def Close(self) :
self.close()
class TimeFilterButton(QtWidgets.QPushButton) :
def __init__(self,parent=None,*args,**kwargs) :
super(TimeFilterButton,self).__init__("Timestamp",parent)
self.clicked.connect(self.timeButtonPushed)
def timeButtonPushed(self) :
self.timewidget = TimeWidget()
self.timewidget.show()
class TimeWidget(QtWidgets.QWidget) :
def __init__(self) :
super(TimeWidget,self).__init__()
widgetlayout = QtWidgets.QVBoxLayout()
self.setLayout(widgetlayout)
fromlayout = QtWidgets.QHBoxLayout()
fromwidget = QtWidgets.QWidget()
fromwidget.setLayout(fromlayout)
fromlabel = QtWidgets.QLabel("To")
fromlayout.addWidget(fromlabel)
datetime = QtCore.QDateTime.currentDateTime()
timezone = QtCore.QTimeZone(b'America/New_York')
newdatetime = datetime.toTimeZone(timezone)
fromchooser = QtWidgets.QDateTimeEdit(newdatetime)
# print("TIMESPEC",fromchooser.timeSpec())
fromlayout.addWidget(fromchooser)
# fromchooser.setDisplayFormat("HH:MM")
widgetlayout.addWidget(fromwidget)
now = QtCore.QDateTime.currentDateTime()
#print(now.toString(Qt.DefaultLocaleLongDate))
zonelist = QtCore.QTimeZone.availableTimeZoneIds(QtCore.QLocale.UnitedStates)
# for zone in zonelist :
class PrefGroupBox(QtWidgets.QGroupBox) :
def __init__(self,text,parent,*args,**kwargs) :
super(PrefGroupBox,self).__init__(text)
getManager().setGroupBoxStyle(self)
self.parent = parent
def reset(self) :
pass
def configureDialog(self) :
pass
def SaveChanges(self) :
pass
#Display the filter preferences.
class FilterPrefs(QtWidgets.QGroupBox) :
def __init__(self,parent=None,name="Default Filters",*args,**kwargs) :
super(FilterPrefs,self).__init__(name,parent)
getManager().setGroupBoxStyle(self)
self.parent = parent
#Each filter will have a button that will need to be configured
self.filterbuttons = []
vlayout = QtWidgets.QVBoxLayout()
vlayout.setSpacing(1)
self.layout = vlayout
self.setLayout(vlayout)
#Widget that contains a button for each filter
filterwidget = self.AddFilterWidgets()
vlayout.addWidget(filterwidget)
self.filterwidget = filterwidget
self.filteraction = getManager().getRemoveFilterAction()
#Use the filteraction's icon
icon = self.filteraction.icon
#User can remove all filters from here.
button = QtWidgets.QPushButton(icon,"Remove All Filters")
button.clicked.connect(self.RemoveFilters)
self.removebutton = button
self.layout.addWidget(self.removebutton)
#Configure the remove and filter buttons
self.configureDialog()
#Enable/disable the removebutton, and add/remove icon for each
#filter button
def configureDialog(self) :
filtered = self.filteraction.getFilterState()
self.removebutton.setEnabled(filtered)
for button in self.filterbuttons :
button.configureDialog()
#Called by the Remove Filters button
def RemoveFilters(self) :
filteraction = self.filteraction
filteraction.removeAllFilters()
#We want to add the filter buttons in the order that
#they appear on the table. Each time the pref dialog is
#opened, remove the previous set, and then add them back in
#the right order.
def RemoveFilterWidgets(self) :
#All of the filter buttons are associated with this widget.
self.filterwidget.deleteLater()
#Sort filters in the order that they are visible in the table
def SortFilters(self) :
sorted = []
#Get the header text in visible order
visualorder = getModel().visibleColumnOrder()
#Add the filter for each header to the sorted list
for header in visualorder :
filter = getFilterByHeader(header)
if (filter != None) :
sorted.append(filter)
return(sorted)
#Save the user's preferences to their pref file.
def SaveChanges(self) :
prefs = getManager().getPrefs()
if (not 'filters' in prefs) :
prefs['filters'] = {}
for filter in getManager().getFilters() :
filtername = filter.getName()
prefs['filters'][filtername] = filter.saveFilter()
#Don't need this method since changes are applied immediately.
def applyChanges(self) :
pass
#Called when the dialog has been RE-opened.
def reset(self) :
#Remove the filter buttons
self.RemoveFilterWidgets()
#Add them back in visible column order
self.filterwidget = self.AddFilterWidgets()
#Add the new widget to the layout
self.layout.addWidget(self.filterwidget)
#Replace the removebutton
self.layout.addWidget(self.removebutton)
#Configure all of the buttons
self.configureDialog()
#Add a button for each available filter
def AddFilterWidgets(self) :
#Empty out the filter button list, since we keep creating
#new ones.
self.filterbuttons = []
#The filterbuttons are associated with another widget
widget = QtWidgets.QWidget()
filterlayout = QtWidgets.QHBoxLayout()
filterlayout.setSpacing(1)
widget.setLayout(filterlayout)
filterlayout.setAlignment(Qt.AlignCenter)
for filter in self.SortFilters() :
headeronly = filter.getProperty('headeronly')
if (headeronly == None or not headeronly) :
filterwidget = FilterButton(filter,widget)
filterlayout.addWidget(filterwidget,Qt.AlignCenter)
self.filterbuttons.append(filterwidget)
return(widget)
#A button that displays the filter menu.
class FilterButton(QtWidgets.QPushButton) :
def __init__(self,filter,parent,*args,**kwargs) :
filtername = filter.getName()
if (filtername == "timestamp") :
filtername = "timespan"
super(FilterButton,self).__init__(filtername,parent)
# self.setFixedWidth(15)
#return
self.filter = filter
self.parent = parent
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.clicked.connect(
lambda checked : self.ShowFilterMenu(checked,filter))
self.icon = QtGui.QIcon("funnel--plus.png")
self.configureDialog()
#Create a filternmenu and display it.
def ShowFilterMenu(self,checked,filter) :
if (isinstance(filter,ExclusiveFilter)) :
menu = ExclusiveFilterMenu(filter)
else :
menu = FilterMenu(filter)
menu.exec_(self.mapToGlobal(self.parent.pos()))
# return(menu)
#If filter is applied, the filter button will have the filter icon
def configureDialog(self) :
filter = self.filter
filtered = filter.isFiltered()
if (filtered) :
self.setIcon(self.icon)
else :
self.setIcon(QtGui.QIcon());
class DefaultRowPrefs(PrefGroupBox) :
""" Allows user to limit then number of rows """
def __init__(self,parent,*args,**kwargs) :
super(DefaultRowPrefs,self).__init__("Display Rows",parent)
self.parent = parent
#Each filter will have a button that will need to be configured
self.filterbuttons = []
#Create an exclusive button group.
#...more than one of the radiobuttons cannot be selected at once
#Have to have buttongroup as a member of the group box (self),
#or method will not recognize it.
self.buttongroup = QtWidgets.QButtonGroup()
#This set of prefs has two associated filters.
layout = QtWidgets.QHBoxLayout()
self.setLayout(layout)
self.layout = layout
layout.setSpacing(50)
self.setSizePolicy(QtWidgets.QSizePolicy.Maximum,QtWidgets.QSizePolicy.Maximum)
#Widget that contains a button for each filter
filterwidget = self.AddFilterWidgets()
layout.addWidget(filterwidget)
self.filterwidget = filterwidget
def AddFilterWidgets(self) :
#Empty out the filter button list, since we keep creating
#new ones.
self.filterbuttons = []
#The filterbuttons are associated with another widget
widget = QtWidgets.QWidget()
filterlayout = QtWidgets.QHBoxLayout()
filterlayout.setSpacing(1)
widget.setLayout(filterlayout)
filterlayout.setAlignment(Qt.AlignCenter)
nolimits = QtWidgets.QRadioButton('All')
filterlayout.addWidget(nolimits)
self.buttongroup.addButton(nolimits)
countfilter = getFilterByHeader('name')
filterwidget = CountLimit(countfilter,widget)
filterlayout.addWidget(filterwidget)
self.buttongroup.addButton(filterwidget.countlimit)
self.filterbuttons.append(filterwidget)
return(widget)
"""
for filter in self.SortFilters() :
print(filter)
break
headeronly = filter.getProperty('headeronly')
if (headeronly == None or not headeronly) :
filterwidget = FilterButton(filter,widget)
filterlayout.addWidget(filterwidget,Qt.AlignCenter)
self.filterbuttons.append(filterwidget)
return(widget)
"""
class CountLimit(QtWidgets.QWidget) :
def __init__(self,filter,parent,*args,**kwargs) :
super(CountLimit,self).__init__(parent)
self.filter = filter
layout = QtWidgets.QHBoxLayout()
layout.setSpacing(4)
self.setSizePolicy(QtWidgets.QSizePolicy.Maximum,QtWidgets.QSizePolicy.Preferred)
self.setLayout(layout)
#Limit to X alarms
countlimit = QtWidgets.QRadioButton("Limit to")
layout.addWidget(countlimit)
self.countlimit = countlimit
countval = QtWidgets.QLineEdit()
countval.setAlignment(Qt.AlignRight)
countval.returnPressed.connect(self.trigger)
countval.setFixedWidth(45)
countvalidator = QtGui.QIntValidator()
countvalidator.setBottom(0)
countval.setValidator(countvalidator)
layout.addWidget(countval)
self.countval = countval
alarmlabel = QtWidgets.QLabel("Alarms")
alarmlabel.setSizePolicy(QtWidgets.QSizePolicy.Maximum,QtWidgets.QSizePolicy.Preferred)
layout.addWidget(alarmlabel)
def getRadioButton(self) :
return(self.countlimit)
def getMaxTextEdit(self) :
return(self.countval)
def configAllOption(self) :
return
def trigger(self,checked=None) :
#The sender will be the checkbox from which the signal came
sender = self.sender()
value = sender.text()
#Actually set the filter
self.filter.setFilter('max',value)
#Redetermine the value of the "all" option
self.configAllOption()
#Set the column header based on the new state
self.filter.setHeader()
#Section of prefdialog dealing with sorting preferences.
class SortPrefs(QtWidgets.QGroupBox) :
def __init__(self,parent,*args,**kwargs) :
super(SortPrefs,self).__init__("Default Sort",parent)
getManager().setGroupBoxStyle(self)
self.prefs = getManager().getPrefs()
layout = QtWidgets.QHBoxLayout()
self.setLayout(layout)
label = QtWidgets.QLabel("Sort By:")
label.setFixedWidth(50)
layout.addWidget(label)
layout.setSpacing(5)
#combo box with all headers
self.combo = QtWidgets.QComboBox()
layout.addWidget(self.combo)
#Create an exclusive button group.
#...two of the radiobuttons cannot be selected at once
#Have to have buttongroup as a member of the group box (self),
#or method will not recognize it.
self.buttongroup = QtWidgets.QButtonGroup()
#Sort ascending direction
sortacscending = QtWidgets.QRadioButton()
sortacscending.setIcon(QtGui.QIcon("sort-alphabet.png"))
sortacscending.setFixedWidth(50)
sortacscending.clicked.connect(self.SelectSortOrder)
layout.addWidget(sortacscending) #add the radiobutton to the layout!
#Add it to the buttongroup. Button group is NOT a widget
self.buttongroup.addButton(sortacscending)
self.sortacscending = sortacscending
#Now the descending option
sortdescending = QtWidgets.QRadioButton()
sortdescending.setIcon(QtGui.QIcon("sort-alphabet-descending.png"))
sortdescending.setFixedWidth(50)
sortdescending.clicked.connect(self.SelectSortOrder)
layout.addWidget(sortdescending)
self.buttongroup.addButton(sortdescending)
self.sortdescending = sortdescending
#Fill the combo box with the header options
self.FillCombo()
self.configureDialog()
self.combo.currentIndexChanged.connect(self.applyChanges)
#Fill the combo with the sortoptions
def FillCombo(self) :
combo = self.combo
#Clear the combo box out.
#We will fill it up in the visible order of the column headers
combo.clear()
#Get the headers (in visible order)
options = getModel().visibleColumnOrder()
#Add the current sort column as the combo box value
header = self.CurrentSortOption()
options.insert(0,header.lower())
#Add each option to the combo box
for option in options :
combo.addItem(option)
#Configure the section
def configureDialog(self) :
#What is the column currently being used to sort?
currentsort = getProxy().sortColumn()
#Ascending or descending?
currentorder = getProxy().sortOrder()
self.sort = currentorder
if (currentorder == 1) :
self.sortdescending.setChecked(True)
else :
self.sortacscending.setChecked(True)
#Which header is currently being used to sort?
def CurrentSortOption(self) :
sortcolumn = getProxy().sortColumn()
#return the header text for the column
sortoption = getModel().headerData(sortcolumn,Qt.Horizontal,Qt.DisplayRole)
return(sortoption)
#Get the column number that is being used to sort.
def GetSortColumn(self) :
sortby = self.combo.currentText().lower()
sortcolumn = getModel().getColumnIndex(sortby)
return(sortcolumn)
#Add this preference widget's properties to the
#managerprefs to be saved
def SaveChanges(self) :
prefs = getManager().getPrefs()
prefs['sort'] = {}
prefs['sort']['column'] = self.GetSortColumn()
prefs['sort']['order'] = self.sort
#Called when user selects a sort column from the combo box
def applyChanges(self) :
#Column number
sortcolumn = self.GetSortColumn()
#Sort order
sortorder = self.sort
#Request table to sort by column
if (sortcolumn != None and sortorder != None) :
getTable().sortByColumn(sortcolumn,sortorder)
#Called when the user selects a sort order radiobutton
def SelectSortOrder(self) :
sort = 0
button = self.buttongroup.checkedButton()
if (button == self.sortdescending) :
sort = 1
self.sort = sort
self.applyChanges()
#All preferences sections have a Reset method.
#For the "Sort Preferencses" section, we want to fill up the
#combo list in the same order as the columns
def reset(self) :
self.FillCombo()
self.configureDialog()
#The widget from which the user can decide which columns to display
#and which to hide
class DisplayPrefs(QtWidgets.QGroupBox) :
def __init__(self,parent, *args,**kwargs) :
super(DisplayPrefs,self).__init__("Display Columns",parent)
getManager().setGroupBoxStyle(self)
#Using a gridlayout, so will keep track of the rows.
self.row = None
self.showlist = []
self.prefs = getManager().getPrefs()
self.parent = parent
layout = QtWidgets.QGridLayout()
layout.setHorizontalSpacing(5)
layout.setVerticalSpacing(0)
layout.setColumnStretch(0,0)
self.setLayout(layout)
self.layout = layout
#Store the listwidgets for the Show and Hide options.
self.options = {}
#The "Show" option default is all of the columns in the table.
#But, user may have set "show/hide" preferences
self.DisplayOptions("Show")
#Hide listwidget is empty by default (unless user has set prefs)
self.DisplayOptions("Hide")
#Put the column headers in the correct boxes.
self.FillDisplayBoxes()
#All preferences sections have a Reset method.
#For the "Display Columns" section, we want to fill up the
#Show List Widget in the same order as the columns
def reset(self) :
self.FillDisplayBoxes()
#Fill the Show/Hide display boxes with the appropriate columns
def FillDisplayBoxes(self) :
#The current set of columns to show and hide
(showlist,hidelist) = self.GetDisplayOptions()
self.FillBox('show',showlist)
self.FillBox('hide',hidelist)
#Fill the box, in list order
def FillBox(self,option,list) :
#The display listwidget
optionlist = self.options[option]
#Clear out the display options
optionlist.clear()
#Add the items to the listwidget
for item in list :
QtWidgets.QListWidgetItem(item,optionlist)
#Get the list of properties available to show, in the
#current column order.
def GetDisplayOptions(self) :
#The horizontal header for our table model
horizheader = getTable().horizontalHeader()
showlist = []
hidelist = []
for col in range(getModel().columnCount(0)) :
#The "visualindex" is the column index that the user
#is actually seeing. We want to move the visual index
#if the user changes the order in the "show" box.
visualindex = horizheader.visualIndex(col)
#Get the header text for this column
header = getModel().headerData(col,Qt.Horizontal,Qt.DisplayRole)
#if the section is NOT hidden, add it to the showlist.
#if hidden, add it to the hidelist
if (not getTable().horizontalHeader().isSectionHidden(col)) :
showlist.insert(visualindex,header.lower())
else :
hidelist.insert(visualindex,header.lower())
#return the results.
return(showlist,hidelist)
#This a row with a label (Show/Hide) and an associated listbox
def DisplayOptions(self,text) :
row = nextRow(self)
#The label, in column 0
label = QtWidgets.QLabel(text)
self.layout.addWidget(label,row,0)
#The drag and drop listbox
dragwidget = DragWidget(self)
self.layout.addWidget(dragwidget,row,1)
#Assign the widget to the "options" dictionary
self.options[text.lower()] = dragwidget
#What properties are in the "Show" or "Hide" list widget,
#when "Apply" is pressed
def GetDisplayList(self,forwhat) :
list = []
options = self.options[forwhat]
for option in range(options.count()) :
item = options.item(option)
text = item.text()
list.append(text)
return(list)
#Add this preference widget's properties to the
#managerprefs to be saved
def SaveChanges(self) :
#Which properties to show, and which to hide.
prefs = getManager().getPrefs()
prefs['display'] = {}
prefs['display']['show'] = self.GetDisplayList("show")
prefs['display']['hide'] = self.GetDisplayList("hide")
#Called when the "Apply" button on the preferences dialog
#is pushed.
def applyChanges(self) :
#Which properties to show, and which to hide.
hidelist = self.GetDisplayList("hide")
showlist = self.GetDisplayList("show")
getModel().applyDisplayPrefs(showlist)
def configureDialog(self) :
pass
#The drag/drop listwidget.
class DragWidget(QtWidgets.QListWidget) :
def __init__(self,parent) :
super(DragWidget,self).__init__(parent)
#User can expand horizontally, but not vertically
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
#Want horizontal listwidgets.
self.setFlow(QtWidgets.QListView.Flow.LeftToRight)
#Configure the drag and drop
self.setDragEnabled(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
self.setDropIndicatorShown(True)
self.setDefaultDropAction(Qt.MoveAction)
self.viewport().setAcceptDrops(True)
#Allow user to select multiple items
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setResizeMode(QtWidgets.QListView.ResizeMode.Adjust)
#Spacing between items
#self.setSpacing(2)
self.setSpacing(getManager().prefSpacing())
#fix the height or the listwidgets are too tall.
self.setFixedHeight(50)
#Override the widget's sizeHint. Want to make sure that
#it is at least big enough to hold all of the column headers.
def sizeHint(self) :
num = self.count()
#extra = num * 20
extra = num * getManager().prefMultiplier()
size = QtCore.QSize()
height =1
width = super(QtWidgets.QListWidget,self).sizeHint().width()
if (num > 0) :
width = width + extra
size.setHeight(height)
size.setWidth(width)
return(size)
```
|
{
"source": "JeffersonLab/jaws",
"score": 3
}
|
#### File: client/common/__init__.py
```python
import time
def get_row_header(headers, timestamp):
ts = time.ctime(timestamp[1] / 1000)
user = ''
producer = ''
host = ''
if headers is not None:
lookup = dict(headers)
bytez = lookup.get('user', b'')
user = bytez.decode()
bytez = lookup.get('producer', b'')
producer = bytez.decode()
bytez = lookup.get('host', b'')
host = bytez.decode()
return [ts, user, host, producer]
def delivery_report(err, msg):
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered')
```
#### File: scripts/client/set-override.py
```python
import os
import pwd
import types
import click
import time
from confluent_kafka import SerializingProducer
from confluent_kafka.schema_registry import SchemaRegistryClient
from jlab_jaws.avro.entities import AlarmOverrideUnion, LatchedOverride, FilteredOverride, MaskedOverride, \
DisabledOverride, OnDelayedOverride, OffDelayedOverride, ShelvedOverride, AlarmOverrideKey, OverriddenAlarmType, \
ShelvedReason
from jlab_jaws.avro.serde import AlarmOverrideKeySerde, AlarmOverrideUnionSerde
from common import delivery_report
bootstrap_servers = os.environ.get('BOOTSTRAP_SERVERS', 'localhost:9092')
sr_conf = {'url': os.environ.get('SCHEMA_REGISTRY', 'http://localhost:8081')}
schema_registry_client = SchemaRegistryClient(sr_conf)
key_serializer = AlarmOverrideKeySerde.serializer(schema_registry_client)
value_serializer = AlarmOverrideUnionSerde.serializer(schema_registry_client)
producer_conf = {'bootstrap.servers': bootstrap_servers,
'key.serializer': key_serializer,
'value.serializer': value_serializer}
producer = SerializingProducer(producer_conf)
topic = 'alarm-overrides'
hdrs = [('user', pwd.getpwuid(os.getuid()).pw_name),('producer','set-override.py'),('host',os.uname().nodename)]
def send() :
producer.produce(topic=topic, value=params.value, key=params.key, headers=hdrs, on_delivery=delivery_report)
producer.flush()
@click.command()
@click.option('--override', type=click.Choice(OverriddenAlarmType._member_names_), help="The type of override")
@click.option('--unset', is_flag=True, help="Remove the override")
@click.option('--expirationseconds', type=int, help="The number of seconds until the shelved status expires, None for indefinite")
@click.option('--reason', type=click.Choice(ShelvedReason._member_names_), help="The explanation for why this alarm has been shelved")
@click.option('--oneshot', is_flag=True, help="Whether shelving is one-shot or continuous")
@click.option('--comments', help="Operator explanation for why suppressed")
@click.option('--filtername', help="Name of filter rule associated with this override")
@click.argument('name')
def cli(override, unset, expirationseconds, reason, oneshot, comments, filtername, name):
global params
params = types.SimpleNamespace()
if override is None:
raise click.ClickException("--override is required")
params.key = AlarmOverrideKey(name, OverriddenAlarmType[override])
if expirationseconds is not None:
timestamp_seconds = time.time() + expirationseconds;
timestamp_millis = int(timestamp_seconds * 1000);
if unset:
params.value = None
else:
if override == "Shelved":
if reason is None:
raise click.ClickException("--reason is required")
if expirationseconds is None:
raise click.ClickException("--expirationseconds is required")
msg = ShelvedOverride(timestamp_millis, comments, ShelvedReason[reason], oneshot)
elif override == "OnDelayed":
if expirationseconds is None:
raise click.ClickException("--expirationseconds is required")
msg = OnDelayedOverride(timestamp_millis)
elif override == "OffDelayed":
if expirationseconds is None:
raise click.ClickException("--expirationseconds is required")
msg = OffDelayedOverride(timestamp_millis)
elif override == "Disabled":
msg = DisabledOverride(comments)
elif override == "Filtered":
if filtername is None:
raise click.ClickException("--filtername is required")
msg = FilteredOverride(filtername)
elif override == "Masked":
msg = MaskedOverride()
else: # assume Latched
msg = LatchedOverride()
params.value = AlarmOverrideUnion(msg)
print(params.value)
send()
cli()
```
|
{
"source": "JeffersonLab/kafka-alarm-scripts",
"score": 2
}
|
#### File: jaws_scripts/broker/delete_topics.py
```python
import json
import os
import pkgutil
from confluent_kafka.admin import AdminClient
def delete_topics() -> None:
"""
Delete JAWS Kafka topics
"""
bootstrap_servers = os.environ.get('BOOTSTRAP_SERVERS', 'localhost:9092')
admin_client = AdminClient({'bootstrap.servers': bootstrap_servers})
conf = pkgutil.get_data("jaws_libp", "avro/topics.json")
topics = json.loads(conf)
results = admin_client.delete_topics(topics, operation_timeout=15)
for topic, future in results.items():
try:
future.result() # The result itself is None
print(f"Topic {topic} deleted")
except Exception as e:
print(f"Failed to delete topic {topic}: {e}")
if __name__ == "__main__":
delete_topics()
```
#### File: jaws_scripts/broker/list_topics.py
```python
import os
from confluent_kafka.admin import AdminClient
def list_topics() -> None:
"""
List Kafka topics
"""
bootstrap_servers = os.environ.get('BOOTSTRAP_SERVERS', 'localhost:9092')
admin_client = AdminClient({'bootstrap.servers': bootstrap_servers})
meta = admin_client.list_topics(timeout=10)
print(f"{len(meta.topics)} topics:")
for topic in iter(meta.topics.values()):
if topic.error is not None:
errstr = f": {topic.error}"
else:
errstr = ""
print(f" \"{topic}\" with {len(topic.partitions)} partition(s){errstr}")
if __name__ == "__main__":
list_topics()
```
#### File: jaws_scripts/broker/show_consumer_groups.py
```python
import os
from confluent_kafka.admin import AdminClient
def show_consumer_groups() -> None:
"""
Show Kafka consumer groups
"""
bootstrap_servers = os.environ.get('BOOTSTRAP_SERVERS', 'localhost:9092')
admin_client = AdminClient({'bootstrap.servers': bootstrap_servers})
groups = admin_client.list_groups(timeout=10)
print(f"{len(groups)} consumer groups:")
for group in groups:
if group.error is not None:
errstr = f": {group.error}"
else:
errstr = ""
print(
f" \"{group}\" mems: {len(group.members)}, pro: {group.protocol}, pro_type: {group.protocol_type}{errstr}")
for member in group.members:
print(f"id {member.id} client_id: {member.client_id} client_host: {member.client_host}")
if __name__ == "__main__":
show_consumer_groups()
```
#### File: jaws_scripts/client/list_effective_alarms.py
```python
import click
from jaws_libp.clients import EffectiveAlarmConsumer
# pylint: disable=missing-function-docstring,no-value-for-parameter
@click.command()
@click.option('--monitor', is_flag=True, help="Monitor indefinitely")
@click.option('--nometa', is_flag=True, help="Exclude audit headers and timestamp")
@click.option('--export', is_flag=True, help="Dump records in AVRO JSON format")
def list_effective_alarms(monitor, nometa, export):
consumer = EffectiveAlarmConsumer('list_effective_alarms.py')
consumer.consume_then_done(monitor, nometa, export)
def click_main() -> None:
list_effective_alarms()
if __name__ == "__main__":
click_main()
```
#### File: jaws_scripts/client/set_category.py
```python
import click
from jaws_libp.clients import CategoryProducer
# pylint: disable=missing-function-docstring,no-value-for-parameter
@click.command()
@click.option('--file', is_flag=True,
help="Imports a file of key=value pairs (one per line) where the key is category name and value is "
"empty string")
@click.option('--unset', is_flag=True, help="Remove the category")
@click.argument('name')
def set_category(file, unset, name) -> None:
producer = CategoryProducer('set_category.py')
key = name
if file:
producer.import_records(name)
else:
if unset:
value = None
else:
value = ""
producer.send(key, value)
def click_main() -> None:
set_category()
if __name__ == "__main__":
click_main()
```
#### File: kafka-alarm-scripts/tests/test_location.py
```python
from click.testing import CliRunner
from jaws_libp.avro.serde import LocationSerde
from jaws_libp.entities import AlarmLocation
from jaws_scripts.client.list_locations import list_locations
from jaws_scripts.client.set_location import set_location
def test_location():
location1_name = "LOCATION1"
location1 = AlarmLocation(None)
runner = CliRunner()
try:
# Set
result = runner.invoke(set_location, [location1_name])
assert result.exit_code == 0
# Get
result = runner.invoke(list_locations, ['--export'])
assert result.exit_code == 0
location_serde = LocationSerde(None)
assert result.output == location1_name + '=' + location_serde.to_json(location1) + '\n'
finally:
# Clear
result = runner.invoke(set_location, [location1_name, '--unset'])
assert result.exit_code == 0
def test_location_with_parent():
location1_name = "LOCATION1"
parent = "LOCATION2"
location1 = AlarmLocation(parent)
runner = CliRunner()
try:
# Set
result = runner.invoke(set_location, [location1_name, '--parent', parent])
assert result.exit_code == 0
# Get
result = runner.invoke(list_locations, ['--export'])
assert result.exit_code == 0
location_serde = LocationSerde(None)
assert result.output == location1_name + '=' + location_serde.to_json(location1) + '\n'
finally:
# Clear
result = runner.invoke(set_location, [location1_name, '--unset'])
assert result.exit_code == 0
```
|
{
"source": "jefferson-lam/ecole",
"score": 3
}
|
#### File: src/ecole/typing.py
```python
from typing import TypeVar, Tuple, Dict, Iterator, Any, overload, Protocol
import ecole
Action = TypeVar("Action")
ActionSet = TypeVar("ActionSet")
class Dynamics(Protocol[Action, ActionSet]):
"""Dynamics are raw environments.
The class is a bare :py:class:`ecole.environment.Environment` without rewards,
observations, and other utlilities.
It defines the state transitions of a Markov Decision Process, that is the series of steps and
possible actions of the environment.
"""
def set_dynamics_random_state(
self, model: ecole.scip.Model, rng: ecole.RandomGenerator
) -> None:
"""Set the random state of the episode.
This method is called by :py:meth:`~ecole.environment.Environment.reset` to
set all the random elements of the dynamics for the upcoming episode.
The random generator is kept between episodes in order to sample different episodes.
Parameters
----------
model:
The SCIP model that will be used through the episode.
rng:
The random generator used by the environment from which random numbers can be extracted.
"""
...
def reset_dynamics(self, model: ecole.scip.Model) -> Tuple[bool, ActionSet]:
"""Start a new episode.
This method brings the environment to a new initial state, *i.e.* starts a new
episode.
The method can be called at any point in time.
Parameters
----------
model:
The SCIP model that will be used through the episode.
Returns
-------
done:
A boolean flag indicating wether the current state is terminal.
If this is true, the episode is finished, and :meth:`step_dynamics` cannot be called.
action_set:
An optional subset of accepted action in the next transition.
For some environment, this may change at every transition.
"""
...
def step_dynamics(self, model: ecole.scip.Model, action: Action) -> Tuple[bool, ActionSet]:
"""Transition from one state to another.
This method takes the user action to transition from the current state to the
next.
The method **cannot** be called if the dynamics has not been reset since its
instantiation or is in a terminal state.
Parameters
----------
action:
The action to take in as part of the Markov Decision Process.
If an action set has been given in the latest call (inluding calls to
:meth:`reset_dynamics`), then the action **must** be in that set.
Returns
-------
done:
A boolean flag indicating wether the current state is terminal.
If this is true, the episode is finished, and this method cannot be called
until :meth:`reset_dynamics` has been called.
action_set:
An optional subset of accepted action in the next transition.
For some environment, this may change at every transition.
"""
...
Data = TypeVar("Data")
class DataFunction(Protocol[Data]):
"""The parent class of all function extracting data from the environment.
Data functions are a generic alias for :py:class:`~ecole.typing.ObservationFunction`,
:py:class:`~ecole.typing.RewardFunction`, and :py:class:`~ecole.typing.InformationFunction`
with different data types, such as float for rewards.
Having a similar interface between them makes it easier to combine them in various ways, such
as creating :py:class:`~ecole.typing.ObservationFunction` or :py:class:`~ecole.typing.InformationFunction`
from a dictionnary of :py:class:`~ecole.typing.RewardFunction`.
This class is meant to represent a function of the whole state trajectory/history.
However, because it is not feasible to keep all the previous states in memory, this equivalent
implementation as a class let the object store information from one transition to another.
See Also
--------
RewardFunction
"""
def before_reset(self, model: ecole.scip.Model) -> None:
"""Reset internal data at the start of episodes.
The method is called on new episodes :py:meth:`~ecole.environment.Environment.reset` right before
the MDP is actually reset, that is right before the environment calls
:py:meth:`~ecole.typing.Dynamics.reset_dynamics`.
It is usually used to reset the internal data.
Parameters
----------
model:
The :py:class:`~ecole.scip.Model`, model defining the current state of the solver.
"""
...
def extract(self, model: ecole.scip.Model, done: bool) -> Data:
"""Extract the data on the given state.
Extract the data after transitionning on the new state given by ``model``.
The function is reponsible for keeping track of relevant information from previous states.
This can safely be done in this method as it will only be called *once per state* *i.e.*,
this method is not a getter and can have side effects.
Parameters
----------
model:
The :py:class:`~ecole.scip.Model`, model defining the current state of the solver.
done:
A flag indicating wether the state is terminal (as decided by the environment).
Returns
-------
:
The return is passed to the user by the environment.
"""
...
def _set_docstring(doc):
"""Decorator to dynamically set docstring."""
def decorator(func):
func.__doc__ = doc
return func
return decorator
Observation = TypeVar("Observation")
class ObservationFunction(DataFunction[Observation], Protocol[Observation]):
"""Class repsonsible for extracting observations.
Observation functions are objects given to the :py:class:`~ecole.environment.Environment` to
extract the observations used to take the next action.
This class presents the interface expected to define a valid observation function.
It is not necessary to inherit from this class, as observation functions are defined by
`structural subtyping <https://mypy.readthedocs.io/en/stable/protocols.html>`_.
It is exists to support Python type hints.
See Also
--------
DataFunction :
Observation function are equivalent to the generic data function, that is a function to
extact an arbitrary type of data.
"""
@_set_docstring(DataFunction.before_reset.__doc__)
def before_reset(self, model: ecole.scip.Model) -> None:
...
@_set_docstring(DataFunction.extract.__doc__.replace("data", "observation"))
def extract(self, model: ecole.scip.Model, done: bool) -> Observation:
...
class RewardFunction(DataFunction[float], Protocol):
"""Class responsible for extracting rewards.
Reward functions are objects given to the :py:class:`~ecole.environment.Environment`
to extract the reward used for learning.
This class presents the interface expected to define a valid reward function.
It is not necessary to inherit from this class, as reward functions are defined by
`structural subtyping <https://mypy.readthedocs.io/en/stable/protocols.html>`_.
It is exists to support Python type hints.
Note
----
Rewards, or rather reward offset, are also extracted on :py:meth:`~ecole.environment.Environment.reset`.
This has no use for learning (since not action has been taken), but is useful when using the cumulative
reward sum as a metric.
See Also
--------
DataFunction :
Reward function are a specific type of generic data function where the data extracted are reward
of type ``float``.
"""
@_set_docstring(DataFunction.before_reset.__doc__)
def before_reset(self, model: ecole.scip.Model) -> None:
...
@_set_docstring(DataFunction.extract.__doc__.replace("data", "reward"))
def extract(self, model: ecole.scip.Model, done: bool) -> float:
...
Information = TypeVar("Information")
class InformationFunction(DataFunction[Dict[str, Information]], Protocol[Information]):
"""Class repsonsible for extracting the the information dictionnary.
Information functions are objects given to the :py:class:`~ecole.environment.Environment` to
extract the addtional information about the environment.
A common pattern is use additional :py:class:`ecole.typing.RewardFunction` and
:py:class:`ecole.typing.ObservationFunction` to easily create information functions.
This class presents the interface expected to define a valid information function.
It is not necessary to inherit from this class, as information functions are defined by
`structural subtyping <https://mypy.readthedocs.io/en/stable/protocols.html>`_.
It is exists to support Python type hints.
See Also
--------
DataFunction :
Information function are a specific type of generic data function where the data extracted
are dictionnary of string to any type.
"""
@_set_docstring(DataFunction.before_reset.__doc__)
def before_reset(self, model: ecole.scip.Model) -> None:
...
@_set_docstring(DataFunction.extract.__doc__.replace("data", "information"))
def extract(self, model: ecole.scip.Model, done: bool) -> Dict[str, Information]:
...
class InstanceGenerator(Protocol):
"""A class to generate generate and iteratate over random problem instance.
The class combines a :py:class:`~ecole.RandomGenerator` with the static function :py:meth:`generate_instance`
to provide iterating capabilities.
"""
@staticmethod
def generate_instance(
*args: Any, rng: ecole.RandomGenerator, **kwargs: Any
) -> ecole.scip.Model:
"""Generate a problem instance using the random generator for any source of randomness."""
...
@overload
def __init__(self, *args: Any, rng: ecole.RandomGenerator, **kwargs: Any) -> None:
"""Create an iterator with the given parameters and a copy of the random state."""
...
def __next__(self) -> ecole.scip.Model:
"""Generate a problem instance using the random generator of the class."""
...
def __iter__(self) -> Iterator[ecole.scip.Model]:
"""Return itself as an iterator."""
...
def seed(self, int) -> None:
"""Seed the random generator of the class."""
...
```
|
{
"source": "jeffersonlizar/bitpayclient",
"score": 2
}
|
#### File: jeffersonlizar/bitpayclient/bitpay_client.py
```python
import requests
from bitpay.client import Client
from bitpay.exceptions import *
from django.conf import settings
from apps.web.models import WebConfigModel
class BitpayClient(Client):
def __init__(self, api_uri="https://bitpay.com", insecure=False, pem='', tokens={}, bitpay_data=None):
web = WebConfigModel.objects.get()
bitpay_url = settings.BITPAY_URL
if web.bitpay_mode == WebConfigModel.TEST:
bitpay_url = settings.BITPAY_TEST_URL
bitpay = bitpay_data
self.tokens = ({
'merchant': bitpay.token_merchant,
'payroll': bitpay.token_payroll
})
self.pem = bitpay.pem
self.client_id = bitpay.client_id
self.uri = bitpay_url
self.verify = not (insecure)
self.user_agent = 'bitpay-python'
def get_rates(self):
uri = settings.BITPAY_URL + "/rates"
try:
response = requests.get(uri)
except Exception as pro:
raise BitPayConnectionError(pro.args)
if response.ok:
return response.json()['data']
self.response_error(response)
def get_rate(self, currency):
uri = settings.BITPAY_URL + "/rates/" + currency
try:
response = requests.get(uri)
except Exception as pro:
raise BitPayConnectionError(pro.args)
if response.ok:
return response.json()['data']
self.response_error(response)
```
|
{
"source": "jeffersonlizar/ShipIt",
"score": 3
}
|
#### File: ShipIt/shipitchile/quotation_request.py
```python
from .exceptions import AttributeNotValidException
class QuotationRequest:
valid_properties = [
'length',
'width',
'height',
'weight',
'destiny',
'courrier_for_client',
'is_payable',
'commune_id',
]
def __init__(self, data):
self.data = {
'commune_id': None,
'courrier_for_client': None,
'destiny': "Domicilio",
'height': 0,
# "is_payable": False,
'length': 0,
'weight': 0,
'width': 0
}
for key in data:
if key not in self.valid_properties:
raise AttributeNotValidException(key)
self.data[key] = data[key]
def to_shipit_format(self):
self.data['address_attributes'] = {
'commune_id': self.data['commune_id']
}
del self.data['commune_id']
package = {
'package': self.data
}
return package
```
|
{
"source": "Jefferson-Lopes/ElectricCircuits",
"score": 3
}
|
#### File: ElectricCircuits/Jupyter notebooks/Circuitos Eletricos I - Semana 11.1.py
```python
from IPython.core.display import HTML
HTML("""
<style>
.output_png {
display: table-cell;
text-align: center;
vertical-align: middle;
}
</style>
""")
# # *Circuitos Elétricos I - Semana 11*
# ### A integral de Laplace
#
# Seja $f(t)$ uma função definida no intervalo $0\leq t \leq \infty$, com $t$ e $f(t)$ reais, então a função $F(s)$, definida pela integral de Laplace
#
# $$\large
# \begin{equation}
# F(s)=\mathcal{L}\{f(t)\}=\int_{0}^{\infty} f(t) e^{-s t} dt,\;\; s \in \mathbb{C},
# \end{equation}
# $$
#
# é conhecida como a transformada de Laplace de $f(t)$.
#
# #### A exponencial complexa
#
# Temos que $s = \sigma + j\omega$, logo
#
# $$
# e^{-s t} = e^{-(\sigma + j\omega) t} = e^{-\sigma t}e^{-j\omega t} = e^{-\sigma t} [\cos(\omega t) + j\sin(\omega t)]
# $$
#
# $$
# \begin{align}
# \mathcal{L}\{f(t)\}&=\int_{0}^{\infty} f(t) e^{-\sigma t} [\cos(\omega t) + j\sin(\omega t)] dt\\
# \mathcal{L}\{f(t)\}&=\int_{0}^{\infty} f(t) e^{-\sigma t} \cos(\omega t) dt + j\int_{0}^{\infty} f(t) e^{-\sigma t}\sin(\omega t) dt\\
# \mathcal{L}\{f(t)\}&=\int_{0}^{\infty} \left[\frac{f(t)}{e^{\sigma t}}\right] \cos(\omega t) dt + j\int_{0}^{\infty} \left[\frac{f(t)}{e^{\sigma t}}\right]\sin(\omega t) dt
# \end{align}
# $$
#
# **Teorema da existência:** se $f(t)$ é uma função contínua por pedaços para $t$ no intervalo $[a,\infty)$ e é exponencial de ordem $\sigma_0$, então a integral de Laplace converge para $\Re{(s)}>a$.
# +
import matplotlib.pyplot as plt
import numpy as np
import sympy as sp
from utils import round_expr, symdisp, symplot
# temp workaround
import warnings
from matplotlib import MatplotlibDeprecationWarning
warnings.filterwarnings('ignore', category=MatplotlibDeprecationWarning)
# +
sp.init_printing()
plt.rcParams['figure.figsize'] = 6, 4
plt.rcParams['legend.fontsize'] = 13
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['axes.grid'] = False
# -
# #### Definindo algumas variáveis simbólicas de interesse
t = sp.symbols('t', real=True)
s = sp.symbols('s')
a = sp.symbols('a', real=True, positive=True)
omega = sp.symbols('omega', real=True)
# ## Transformada de Laplace no Sympy
# +
# transformada de Laplace
def L(f,t,s):
return sp.laplace_transform(f, t, s, noconds=True)
# transformada inversa de Laplace
def invL(F,s,t):
return sp.inverse_laplace_transform(F, s, t, noconds=True)
# -
help(sp.laplace_transform)
# ## Função degrau unitário
#
# #### Domínio do tempo
# +
f = sp.Heaviside(t) # função degrau unitário
symdisp('f(t) =', f)
# -
# plota função no domínio do tempo
intervalo = np.arange(-4, 4, 0.01)
symplot(t, f, intervalo, 'u(t)')
# #### Domínio de Laplace
# +
# calcula a transformada de Laplace de u(t)
F = L(f,t,s)
symdisp('F(s) =', F)
# +
f = sp.Heaviside(t-2) # função degrau unitário em t=2
symdisp('f(t) =', f)
# -
# plota função no domínio do tempo
intervalo = np.arange(-4, 4, 0.01)
symplot(t, f, intervalo, 'u(t-2)')
# +
F = L(f,t,s)
symdisp('F(s) =', F)
# +
u1 = sp.Heaviside(t) # função degrau unitário em t=0
u2 = sp.Heaviside(t-2) # função degrau unitário em t=2
# plota função no domínio do tempo
intervalo = np.arange(-4, 4, 0.01)
symplot(t, u1-u2, intervalo, 'u(t)-u(t-2)')
# +
G = L(u1-u2,t,s)
symdisp('G(s) =', G)
# -
# ## Função impulso unitário
#
# #### Domínio do tempo
# +
f = sp.DiracDelta(t)
symdisp('f(t) =', f)
# -
# #### Domínio de Laplace
# +
# calcula a transformada de Laplace de δ(t)
F = L(f,t,s)
symdisp('F(s) =', F)
# -
# ## Função exponencial
#
# #### Domínio do tempo
# +
f = sp.exp(-a*t)
symdisp('f(t) =', f)
# -
# plota função no domínio do tempo
intervalo = np.arange(-1, 4, 0.01)
symplot(t, f.subs({a:2}), intervalo, 'f(t)')
# #### Domínio de Laplace
# +
# calcula a transformada de Laplace de f(t)
F = L(f,t,s)
symdisp('F(s) =', F)
# -
# ## Função cosseno amortecido
#
# #### Domínio do tempo
# +
g = sp.exp(-a*t)*sp.cos(omega*t)
symdisp('g(t) =', g)
# -
# plota função no domínio do tempo
intervalo = np.arange(-1, 4, 0.01)
symplot(t, g.subs({a:2, omega:10}), intervalo, 'g(t)')
# +
G = L(g,t,s)
symdisp('G(s) =', G)
# -
# ## Resposta subamortecida de um circuito de segunda ordem
# #### Domínio do tempo
# +
B1, B2 = sp.symbols('B1, B2', real=True)
h = sp.exp(-a*t)*(B1*sp.cos(omega*t) + B2*sp.sin(omega*t))
symdisp('h(t) =', h)
# -
# #### Domínio de Laplace
# +
H = L(h,t,s)
symdisp('H(s) =', H)
# +
h1 = invL(H,s,t)
symdisp('h_1(t) =', h1)
# -
# ## Gere sua tabela de transformadas
# +
func = [1,
t,
sp.exp(-a*t),
t*sp.exp(-a*t),
t**2*sp.exp(-a*t),
sp.sin(omega*t),
sp.cos(omega*t),
1 - sp.exp(-a*t),
sp.exp(-a*t)*sp.sin(omega*t),
sp.exp(-a*t)*sp.cos(omega*t),
]
func
symdisp('f(t) =', func)
# +
Fs = [L(f,t,s) for f in func]
symdisp('F(s) =', Fs)
# -
F = sp.Function('f')(t)
F
L(sp.diff(t**2*sp.exp(-a*t),t),t,s)
```
|
{
"source": "jeffersonm03/Busca_rapida_mercado_livre",
"score": 3
}
|
#### File: jeffersonm03/Busca_rapida_mercado_livre/mercado_livre.py
```python
import requests
from bs4 import BeautifulSoup
import re
import os
import time
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
url_base = 'https://lista.mercadolivre.com.br/'
def buscarProduto():
produto_nome = input('Qual produto você deseja procurar? ' + RED)
print(RESET)
print('Buncando resultados...')
time.sleep(1)
os.system('clear')
print('Resultados de: ' + RED + produto_nome + RESET)
for i in range(0, len(produto_nome)):
produto_nome = re.sub(' ', '-', produto_nome)
response = requests.get(url_base + produto_nome)
site = BeautifulSoup(response.text, 'html.parser')
produtos = site.findAll('div', attrs={'class': 'andes-card'})
for produto in produtos:
print('\n')
titulo = produto.find('h2', attrs={'class': 'ui-search-item__title'})
link = produto.find('a', attrs={'class': 'ui-search-link'})
simbolo = produto.find('span', attrs={'class': 'price-tag-symbol'})
preco = produto.find('span', attrs={'class': 'price-tag-fraction'})
separador = produto.find('span', attrs={'class': 'price-tag-decimal-separator'})
cents = produto.find('span', attrs={'class': 'price-tag-cents'})
if(simbolo and preco and separador and cents):
titulo = titulo.text
link = link['href']
valor = simbolo.text + preco.text + separador.text + cents.text
print(BOLD + 'Produto: ', BLUE + titulo + RESET)
print(BOLD + 'Link do produto: ', CYAN + link + RESET)
print(BOLD + 'Preço do produto: ', GREEN + valor + RESET)
elif(simbolo and preco):
titulo = titulo.text
link = link['href']
valor = simbolo.text + preco.text
print(BOLD + 'Produto: ', BLUE + titulo + RESET)
print(BOLD + 'Link do produto: ', CYAN + link + RESET)
print(BOLD + 'Preço do produto: ', GREEN + valor + RESET)
else:
titulo = titulo.text
link = link['href']
print(BOLD + 'Produto: ', BLUE + titulo + RESET)
print(BOLD + 'Link do produto: ', CYAN + link + RESET)
print(BOLD + 'Preço do produto: ', RED + 'NaN' + RESET)
print('\n')
while True:
buscarProduto()
a = input('Pressione '+ RED +'Enter'+ RESET +' para uma nova busca! Ou '+ RED +'Digite'+ RESET +' qualquer coisa para finalizar: \n')
os.system('clear')
if(a != ''):
break
```
|
{
"source": "jeffersonm03/HandMouseMove",
"score": 3
}
|
#### File: HandMouseMove/controlarMouseMao-Pynput/HandMouseControl.py
```python
import cv2
import time
import numpy as np
from HandTrackingModule import handDetector
import math
import os
from pynput.mouse import Button, Controller
import tkinter as tk
root = tk.Tk()
#################################################
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
#################################################
mouse = Controller()
####################################
wCam, hCam = 640, 480
####################################
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
fps, pTime = 0, 0
detector = handDetector(detecConfidence=0.4)
finalizar, cond = False, 0
def attMouse(lmList, cond):
if len(lmList) != 0:
x1, y1 = lmList[4][1], lmList[4][2] # ponta do dedo polegar
x2, y2 = lmList[8][1], lmList[8][2] # ponta do dedo indicador
x3, y3 = lmList[20][1], lmList[20][2] # ponta do dedo midinho
cx, cy = (x1+x2)//2 , (y1+y2)//2
cx, cy = cx/640, cy/480 # pega o percentual de 0 a 1 dos centros
# ifs para evitar que envie uma coordenada maior que o tamanho da tela, 1.1 por exemplo
if cx > 1:
cx = 1
if cx < 0:
cx = 0
if cy > 1:
cy = 1
if cy < 0:
cy = 0
length1, length2 = math.hypot(x2-x1,y2-y1), math.hypot(x3-x1,y3-y1) # (distancia do dedão ao indicador), (distancia do dedao ao midinho)
if length1<35 and cond == 0: #condição para executar o comando só uma vez ("pressionar")
mouse.press(Button.left)
cond = 1
elif length1>35 and cond == 1: #condição para executar o comando só uma vez ("soltar")
mouse.release(Button.left)
cond = 0
mouse.position = (screen_width*(1 - cx), screen_height*cy) # att posição do mouse
if length2>300 : # verifica se a mão está "aberta" se a distancia do dedão ao midinho ultrapassou 300
os.system('clear')
print("Programa Finalizado!!")
return (True, cond)
else:
return (False, cond)
else:
return (False, cond)
while True:
success, img = cap.read()
img = detector.findHands(img, draw=False)
lmList = detector.findPosition(img, draw=False)
finalizar, cond = attMouse(lmList, cond)
if finalizar:
break
cv2.waitKey(1)
time.sleep(0.02)
```
|
{
"source": "jeffersonmourak/DotCommands",
"score": 3
}
|
#### File: dot_commands/dot_commands/actions.py
```python
from dot_commands import setupCommands, fileAnalizer
import os
registerd = ["setup", "info"]
''
def _prettyTable(data, total):
maxSize = 0
postWordSize = 7
for line in data:
if len(line["label"]) > maxSize:
maxSize = len(line["label"])
data = sorted(data, key=lambda k: k['count'])[::-1]
for index,line in enumerate(data):
percentOffset = (maxSize - len(line["label"])) + postWordSize
percent = float((100.0*line["count"]) / total)
print str(index + 1) + "º " + line["label"] + " files ",
print "-" * percentOffset,
print("%.2f%% - %d" % (percent, line["count"]))
def setup():
setupCommands.list(os.getcwd())
def info():
_dir = os.getcwd()
print "Project main language:"
print fileAnalizer.getProjectMainType(_dir)
print "\nProject Data: "
files, total = fileAnalizer.listFiles(_dir, True)
_prettyTable(files,total)
# n - 100
# x - y
# n/y = 100x
# y = 100x/n
```
#### File: cores/python/action_functions.py
```python
import subprocess
import os
from dot_commands import commands
def makeCommand(string, array):
packages = " ".join(array)
string = string + " " + packages
return string.split(" ")
def pip(packages):
commands.runCommand(makeCommand("pip install", packages))
```
#### File: cores/python/loader.py
```python
import actions
from dot_commands import dotFile
def start(currentPath, arguments):
action = arguments[0]
arguments = arguments[1:]
try:
trigger = getattr(actions, action)
trigger(arguments)
except AttributeError:
dotFile.findAndRun(action,currentPath)
```
#### File: DotCommands/dot_commands/install.py
```python
import os
import subprocess
path = os.getcwd()
def updatePermissions(filename):
command = ["chmod", "+x", filename]
try:
command = subprocess.Popen(command, stdout=subprocess.PIPE)
output = command.communicate()[0]
print output
except OSError as e:
print e,
def generateBin():
commands = ["info", "install", "setup"]
if not os.path.isdir(os.path.join(path, "bin")):
os.makedirs(path+"/bin")
for action in commands:
filename = open(path+"/bin/."+action, "w")
filename.write("python " + path + "/main.py " + action + " $*")
filename.close()
print "updating permissions for " + action
updatePermissions(path+"/bin/."+action)
def updatePath():
home = os.environ["HOME"]
bashprofile = open(os.path.join(home,".bash_profile"), 'a')
bashprofile.write("\nexport PATH=\"" + path + "/bin:$PATH\"")
bashprofile.close()
print "Generating binaries"
generateBin()
print "Updating Path"
updatePath()
```
|
{
"source": "jeffersonmourak/UrgentCode",
"score": 2
}
|
#### File: UrgentCode/webapp/actions.py
```python
from django.shortcuts import render_to_response,redirect
from django.http import HttpResponse
from django.core.context_processors import csrf
from forum import usersControl,ForumsList
from security import securityControl
from webapp import models
def isNotEmpty(s):
return bool(s and s.strip())
def reply(request,user,forum):
users = usersControl.users(request)
security = securityControl.Security(request,users)
if request.method == "POST":
if security.islogged():
answer = request.POST.get("answer",False)
loggedUser = users.logged()
loggedUser_id = users.id(loggedUser.username)
forum_id = ForumsList.forumId(forum,user)
if not answer == False and isNotEmpty(answer):
answers = models.Answers(user_id=loggedUser_id,forum_id=forum_id,message=answer)
answers.save()
loggedUser.addPoint(5)
return redirect("/forum/" + user + "/" + forum)
else:
render_to_response("forum/404.html")
else:
render_to_response("forum/404.html")
def like(request,user,forum,answer):
users = usersControl.users(request)
security = securityControl.Security(request,users)
if security.islogged():
loggedUser = users.logged()
user_id = users.id(loggedUser.username)
likesModel = models.Likes
likes = likesModel.objects.filter(user_id=user_id, answer_id=answer)
if len(likes) == 0:
like = models.Likes(user_id=user_id,answer_id=answer)
like.save()
return redirect("/forum/" + user + "/" + forum)
else:
render_to_response("forum/404.html")
def unlike(request,user,forum,answer):
users = usersControl.users(request)
security = securityControl.Security(request,users)
if security.islogged():
loggedUser = users.logged()
loggedUser_id = users.id(loggedUser.username)
likesModel = models.Likes
likes = likesModel.objects.filter(user_id=loggedUser_id, answer_id=answer)
likes[0].delete()
return redirect("/forum/" + user + "/" + forum)
else:
render_to_response("forum/404.html")
```
#### File: UrgentCode/webapp/views.py
```python
from django.shortcuts import render_to_response
from django.http import HttpResponse
def index(request):
return render_to_response("index.html")
```
|
{
"source": "JeffersonOliveira/Exercises--OO2-with-Python3",
"score": 3
}
|
#### File: Exercises--OO2-with-Python3/Exercises/folhaDePagamento.py
```python
class FolhaDePagamento:
@staticmethod
def log():
return f'Isso é um log qualquer.'
#folha = FolhaDePagamento()
#print(folha.log())
print(FolhaDePagamento.log())
```
#### File: Exercises--OO2-with-Python3/Exercises/pessoa_02.py
```python
class Pessoa:
tamanho_cpf = 11
def __init__(self, nome, cpf):
self.__nome = nome
self.__cpf = cpf
@property
def cpf(self):
return self.__cpf
def valida_cpf(self):
return True if len(self.cpf) == __class__.tamanho_cpf else False
pe2 = Pessoa('Jess','0000000000')
print(pe2.valida_cpf())
pe = Pessoa('Jeff','00000000001')
print(pe.valida_cpf())
```
|
{
"source": "JeffersonOliveira/Exercises--OO_Fundamentals_with_Python",
"score": 4
}
|
#### File: JeffersonOliveira/Exercises--OO_Fundamentals_with_Python/conta.py
```python
class Conta:
def __init__(self, numero, titular, saldo, limite=1000.0):
print('Construindo um objeto ... {}'.format(self))
self.__numero = numero
self.__titular = titular
self.__saldo = saldo
self.__limite = limite
self.__codigo_banco = '001'
@property
def numero(self):
return self.__numero
@property
def titular(self):
return self.__titular
@property
def saldo(self):
return self.__saldo
@property
def limite(self):
return self.__limite
@property
def codigo_banco(self):
return self.__codigo_banco
@limite.setter
def limite(self, novo_limite):
self.__limite = novo_limite
def obtem_extrato(self):
print('Saldo de {} do titular {}'.format(self.__saldo, self.__titular))
def deposita(self, valor):
self.__saldo += valor
def __pode_sacar(self,valor):
valor_disponivel_para_saque = self.__saldo + self.__limite
return valor <= valor_disponivel_para_saque
def saca(self, valor):
if (self.__pode_sacar(valor)):
self.__saldo -= valor
else:
print('O valor {} passou da soma do saldo e limite. Saque não realizado'.format(valor))
def transfere(self, valor, destino):
self.saca(valor)
destino.deposita(valor)
@staticmethod
def codigos_bancos():
return {'BB':'001','Caixa':'104','Bradesco':'237'}
if __name__ == '__main__':
conta1 = Conta(1, 'Jefferson', 2500.0)
print(conta1.__dict__)
```
#### File: Exercises--OO_Fundamentals_with_Python/Exercises/datas.py
```python
class Data:
def __init__(self,dia,mes,ano):
self.dia = dia
self.mes = mes
self.ano = ano
def formatada(self):
print('{}/{}/{}'.format(self.dia,self.mes, self.ano))
'''
def formatada(self):
print(f'{self.dia:02d}/{self.mes:02d}/{self.ano}')
def formatada(self):
print(f'{self.dia:0>2}/{self.mes:0>2}/{self.ano}')
'''
```
#### File: Exercises--OO_Fundamentals_with_Python/Exercises/quadrilatero.py
```python
class Quadrilatero:
def __init__(self, lado1, lado2):
self.__lado1 = lado1
self.__lado2 = lado2
def retangulo():
pass
def retangulo():
pass
def retangulo():
pass
```
|
{
"source": "JeffersonQin/deep-learning",
"score": 3
}
|
#### File: d2l/utils/d2lhelper.py
```python
import numpy as np
import random
import torch
import torchvision
import torch.nn as nn
import time
from torch.utils import data
from d2l import torch as d2l
from matplotlib import pyplot as plt
from IPython import display
#################### Data Pipeline ####################
# 进行分装,增加 Resize 功能
def load_data_fashion_mnist(batch_size, dataloader_worker_count, resize=None):
trans = [torchvision.transforms.ToTensor()]
if resize:
trans.insert(0, torchvision.transforms.Resize(resize))
trans = torchvision.transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=True)
return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=dataloader_worker_count),
data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=dataloader_worker_count))
def load_data_iter(features, labels, batch_size):
'''
return a data iterator with mini-batch feature
'''
num = len(features)
indices = list(range(num))
# randomize
random.shuffle(indices)
for i in range(0, num, batch_size):
# use min to prevent crossing border
batch_indices = torch.tensor(indices[i : min(i + batch_size, num)])
yield features[batch_indices], labels[batch_indices]
#################### Test & Train ####################
def evaluate_accuracy(net, data_iter, device=torch.device('cpu')):
'''
evaluate accuracy of a model with given dataset
'''
net.eval() # switch to evaluation mode for Dropout, BatchNorm etc. layers
if not device: # 查询第一个参数所在的第一个设备
device = list(net.collect_params().values())[0].list_ctx()[0]
acc_sum, n = torch.tensor([0], dtype=torch.float32, device=device), 0
for X, y in data_iter:
# Copy to device
X, y = X.to(device), y.to(device)
with torch.no_grad():
y = y.long()
acc_sum += torch.sum((torch.argmax(net(X), dim=1) == y))
n += y.shape[0]
return acc_sum.item() / n
def train_cnn_ch6(net, train_iter, test_iter, num_epochs, lr, device):
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
# init params
net.apply(init_weights)
# copy to device
print('training on', device)
net.to(device)
# define optimizer & loss function
optimizer = torch.optim.SGD(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss()
animator = Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
timer = Timer()
num_batches = len(train_iter)
for epoch in range(num_epochs):
# train loss, train acc sum, sample count
metric = d2l.Accumulator(3)
# switch to train mode
net.train()
for i, (X, y) in enumerate(train_iter):
timer.start()
optimizer.zero_grad()
X, y = X.to(device), y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
optimizer.step()
with torch.no_grad():
y = y.long()
acc_sum = torch.sum((torch.argmax(net(X), dim=1) == y))
n = y.shape[0]
metric.add(l * X.shape[0], acc_sum, n)
timer.stop()
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(train_l, train_acc, None))
test_acc = evaluate_accuracy(net, test_iter, device)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
f'test acc {test_acc:.3f}')
print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec '
f'on {str(device)}')
#################### Plotting ####################
# from d2l
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""A utility function to set matplotlib axes"""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend: axes.legend(legend)
axes.grid()
# from d2l
class Animator(object):
def __init__(self, xlabel=None, ylabel=None, legend=[], xlim=None,
ylim=None, xscale='linear', yscale='linear', fmts=None,
nrows=1, ncols=1, figsize=(3.5, 2.5)):
"""Incrementally plot multiple lines."""
self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1: self.axes = [self.axes,]
# use a lambda to capture arguments
self.config_axes = lambda : set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
"""Add multiple data points into the figure."""
if not hasattr(y, "__len__"): y = [y]
n = len(y)
if not hasattr(x, "__len__"): x = [x] * n
if not self.X: self.X = [[] for _ in range(n)]
if not self.Y: self.Y = [[] for _ in range(n)]
if not self.fmts: self.fmts = ['-'] * n
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
#################### Accumulator ####################
# from d2l
class Accumulator(object):
"""
Sum a list of numbers over time
from: https://github.com/dsgiitr/d2l-pytorch/blob/master/d2l/base.py
"""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + b for a, b in zip(self.data, args)]
def reset(self):
self.data = [0] * len(self.data)
def __getitem__(self, i):
return self.data[i]
#################### Timer ####################
# from d2l
class Timer(object):
"""Record multiple running times."""
def __init__(self):
self.times = []
self.start()
def start(self):
"""Start the timer"""
self.start_time = time.time()
def stop(self):
"""Stop the timer and record the time in a list"""
self.times.append(time.time() - self.start_time)
return self.times[-1]
def avg(self):
"""Return the average time"""
return sum(self.times)/len(self.times)
def sum(self):
"""Return the sum of time"""
return sum(self.times)
def cumsum(self):
"""Return the accumuated times"""
return np.array(self.times).cumsum().tolist()
```
|
{
"source": "JeffersonQin/lightnovel_epub",
"score": 3
}
|
#### File: lightnovel_epub/utils/downloader.py
```python
import sys
import requests
import os
import unicodedata
import re
import click
import traceback
from utils import echo
def _download_file(url, dir, headers):
echo.push_subroutine(sys._getframe().f_code.co_name)
echo.clog(f'start downloading: {url} => {dir}')
ret = 0
try:
# start and block request
r = requests.get(url, stream=True, headers=headers, timeout=3000)
# obtain content length
length = int(r.headers['content-length'])
echo.clog(f'file size: {size_description(length)}')
if os.path.exists(dir) and os.path.getsize(dir) == length:
echo.clog(f'file already exists {dir}')
else:
# start writing
f = open(dir, 'wb+')
# show in progressbar
with click.progressbar(label="Downloading from remote: ", length=length) as bar:
for chunk in r.iter_content(chunk_size = 512):
if chunk:
f.write(chunk)
bar.update(len(chunk))
echo.csuccess('Download Complete.')
f.close()
except Exception as err:
echo.cerr(f'Error: {repr(err)}')
traceback.print_exc()
ret = 1
finally:
echo.pop_subroutine()
return ret
def download_file(url, dir, headers, trial=5):
fail_count = 0
while True:
ret = _download_file(url, dir, headers)
if ret == 0:
return
if fail_count < trial:
fail_count += 1
echo.cerr(f'Download failed, Trial {fail_count}/{trial}')
else:
echo.cexit('Download failed. Exceeded trial limit.')
def _download_webpage(url, headers, encoding):
'''
Download webpage from url.
:param url: url to download
'''
echo.push_subroutine(sys._getframe().f_code.co_name)
echo.clog(f'start downloading: {url} => memory')
# download
try:
return requests.get(url=url, headers=headers).content.decode(encoding)
except Exception as e:
echo.cerr(f'error: {repr(e)}')
traceback.print_exc()
return -1
finally:
echo.pop_subroutine()
def download_webpage(url, headers, encoding='utf-8', trial=5):
'''
Download webpage from url.
:param url: url to download
:param trial: number of trials
'''
fail_count = 0
while True:
ret = _download_webpage(url, headers, encoding)
if ret != -1:
return ret
if fail_count < trial:
fail_count += 1
echo.cerr(f'Download failed, Trial {fail_count}/{trial}')
else:
echo.cexit('Download failed. Exceeded trial limit.')
def size_description(size):
'''
Taken and modified from https://blog.csdn.net/wskzgz/article/details/99293181
'''
def strofsize(integer, remainder, level):
if integer >= 1024:
remainder = integer % 1024
integer //= 1024
level += 1
return strofsize(integer, remainder, level)
else:
return integer, remainder, level
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
integer, remainder, level = strofsize(size, 0, 0)
if level + 1 > len(units):
level = -1
return ( '{}.{:>03d} {}'.format(integer, remainder, units[level]) )
```
|
{
"source": "JeffersonQin/YuzuMarker",
"score": 3
}
|
#### File: YuzuMarker.IPS/utils/cv.py
```python
import numpy as np
def reverse(src):
return np.zeros(src.shape, dtype=src.dtype) + 255 - src
def minAreaNonRotatedRectangle(points):
y_1 = points[:, 0, 0].min()
y_2 = points[:, 0, 0].max()
x_1 = points[:, 0, 1].min()
x_2 = points[:, 0, 1].max()
return ((int((y_1 + y_2) / 2), int((x_1 + x_2) / 2)),
(int(x_2 - x_1), int(y_2 - y_1)), -90)
```
#### File: YuzuMarker/YuzuMarker.IPS/YuzuIPS.py
```python
from fastapi import FastAPI
from typing import Optional
import uvicorn
from matplotlib import pyplot as plt
import cv2
import numpy as np
from utils import cv
from utils import peak
from utils import helper
host = "127.0.0.1"
port = 1029
app = FastAPI()
@app.get('/')
def read_root():
return {"Hello": "World"}
@app.get('/detect_peak_color_bgr')
def detect_peak_color_bgr(src: str,
mask: str,
thres: Optional[float] = 0.1,
min_dist: Optional[int] = 1,
thres_abs: Optional[bool] = False,
preferred_r: Optional[int] = 255,
preferred_g: Optional[int] = 255,
preferred_b: Optional[int] = 255):
try:
img = cv2.imread(src)
mask = cv2.imread(mask, cv2.IMREAD_GRAYSCALE)
mask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1]
preferred_color_bgr = [preferred_b, preferred_g, preferred_r]
ret = []
for i in range(3):
hist = cv2.calcHist([img], [i], mask, [256], [0, 256])
peaks = peak.indexes(hist.squeeze(), thres, min_dist, thres_abs)
if peaks.shape[0] == 0:
peaks = np.array([int(np.argmax(hist.squeeze()))])
ret.append(int(peaks[np.argmin(np.abs(peaks - preferred_color_bgr[i]))]))
return helper.get_server_success_message({
'b': ret[0],
'g': ret[1],
'r': ret[2]
})
except Exception as e:
return helper.get_server_exception_message(e)
@app.get('/detect_max_color_bgr')
def detect_max_color_bgr(src: str, mask: str):
try:
img = cv2.imread(src)
mask = cv2.imread(mask, cv2.IMREAD_GRAYSCALE)
mask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1]
ret = []
for i in range(3):
hist = cv2.calcHist([img], [i], mask, [256], [0, 256])
ret.append(int(np.argmax(hist.squeeze())))
return helper.get_server_success_message({
'b': ret[0],
'g': ret[1],
'r': ret[2]
})
except Exception as e:
return helper.get_server_exception_message(e)
@app.get('/detect_text_naive')
def detect_text_naive(src: str,
text_direction: Optional[str] = 'v',
text_color: Optional[str] = 'b',
rotated_output: Optional[bool] = True,
ed_iteration: Optional[int] = 3,
area_threshold: Optional[int] = 500,
kernel_ratio: Optional[float] = 0.005):
img = cv2.imread(src)
plt.imshow(img)
plt.show()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
if text_color == 'b':
binary = cv.reverse(binary)
kernel_size_x = 1
kernel_size_y = 1
if text_direction == 'v':
kernel_size_y = int(img.shape[1] * kernel_ratio)
else:
kernel_size_x = int(img.shape[1] * kernel_ratio)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size_x, kernel_size_y))
dilation = cv2.dilate(binary, kernel, iterations=ed_iteration)
erosion = cv2.erode(dilation, kernel, iterations=ed_iteration)
plt.imshow(erosion, cmap=plt.cm.gray)
contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
region = []
for i in range(len(contours)):
cnt = contours[i]
# 计算该轮廓的面积
area = cv2.contourArea(cnt)
# 面积小的都筛选掉
if (area < area_threshold):
continue
if rotated_output:
# 找到最小的矩形,该矩形可能有方向
rect = cv2.minAreaRect(cnt)
else:
rect = cv.minAreaNonRotatedRectangle(cnt)
# box是四个点的坐标
box = cv2.boxPoints(rect)
box = np.int0(box)
region.append(box)
green_box = cv2.drawContours(img, region, -1, (0, 255, 0), 2)
plt.imshow(green_box)
plt.show()
if __name__ == '__main__':
uvicorn.run(app, host=host, port=port)
```
|
{
"source": "Jefferson-S-Rodrigues/AjudaContinhas",
"score": 2
}
|
#### File: back/aritmetica/views.py
```python
from django.shortcuts import render
from random import randint, choice
from math import floor
def continhas(request):
trocar = True
acertou = None
pessoal = None
resto = None
qnt_erros = 0
acertos = 0
total = 0
if request.method == 'POST':
if 'contas' in request.session:
a = request.session['contas'][0]
b = request.session['contas'][1]
o = request.session['contas'][2]
qnt_erros = request.session['contas'][3]
acertos = request.session['contas'][4]
total = request.session['contas'][5]
del (request.session['contas'])
else:
a = None
b = None
o = None
acertos = 0
total = 0
total += 1
if o == 0:
r = a * b
res = 0
else:
r = floor(a / b)
res = a % b
pessoal = int(request.POST['resposta'])
resto = int(request.POST.get('resto', 0))
if pessoal == r and (o == 0 or resto == res):
trocar = True
acertou = 1
qnt_erros = 0
acertos += 1
else:
trocar = False
acertou = 0
qnt_erros += 1
elif 'contas' in request.session:
del (request.session['contas'])
if trocar or qnt_erros > 3:
o = randint(0, 1)
if o == 0:
a = randint(1000, 10000)
b = randint(0, 1000)
else:
a = randint(1000, 10000)
b = randint(1, 12)
request.session['contas'] = [a, b, o, qnt_erros, acertos, total]
perc = floor(acertos / total * 100) if total > 0 else '-'
return render(
request, 'continhas.html',
{
'a': a,
'b': b,
'o': o,
'acertou': acertou,
'pessoal': pessoal,
'resto': resto,
'perc': perc,
'total': total
}
)
def continhasB(request):
trocar = True
acertou = None
pessoal = None
qnt_erros = 0
acertos = 0
total = 0
if request.method == 'POST':
if 'contasB' in request.session:
expressao = request.session['contasB'][0]
qnt_erros = request.session['contasB'][1]
acertos = request.session['contasB'][2]
total = request.session['contasB'][3]
del (request.session['contasB'])
else:
expressao = None
acertos = 0
total = 0
total += 1
r = eval(expressao)
pessoal = int(request.POST['resposta'])
if pessoal == r:
trocar = True
acertou = 1
qnt_erros = 0
acertos += 1
else:
trocar = False
acertou = 0
qnt_erros += 1
elif 'contasB' in request.session:
del (request.session['contasB'])
if trocar or qnt_erros > 3:
l = 1
m = 20
contas = ['+','-','*','+','*']
ff = randint(0, 1)
expressao = str(randint(l, m))
expressao += choice(contas)
expressao += str(randint(l, m))
expressao += choice(contas)
expressao += str(randint(l, m))
expressao += choice(contas)
expressao += str(randint(l, m))
if ff > 0:
expressao += choice(contas)
expressao += str(randint(l, m))
request.session['contasB'] = [expressao, qnt_erros, acertos, total]
perc = floor(acertos / total * 100) if total > 0 else '-'
return render(
request, 'continhasB.html',
{
'expressao': expressao.replace('*', 'x'),
'acertou': acertou,
'pessoal': pessoal,
'perc': perc,
'total': total
}
)
```
|
{
"source": "jeffersonvenancio/therightpark",
"score": 3
}
|
#### File: python/car/controller.py
```python
import json
from flask import Blueprint, request, session
from google.appengine.api import search
from car.model import Car
cars = Blueprint('cars', __name__)
@cars.route('/', methods=['GET'])
def get_all():
cars = [u.to_dict() for u in Car.query().fetch()]
return json.dumps(cars)
@cars.route('/<int:car_id>', methods=['GET'])
def get_by_id(car_id):
car = Car.get_by_id(car_id).to_dict()
return json.dumps(car)
@cars.route('/', methods=['POST'], strict_slashes=False)
def add():
plate = request.form['plate']
rfid = request.form['rfid']
pref = request.form['pref']
car = Car(plate=plate, rfid=rfid, pref=False)
car.put()
return '', 204
```
|
{
"source": "jeffersonvivanco/Algorithms",
"score": 2
}
|
#### File: dijkstra_algorithm/maps/maps_exception.py
```python
class MapsException(Exception):
def __init__(self, *args: object, **kwargs: object) -> None:
super().__init__(*args, **kwargs)
```
#### File: python/guessingWordProblem/guess_word_helper.py
```python
from functools import total_ordering
import random
guess_word_helper_var = {}
# This class is to create letter objects.
# A Letter object represents a letter in the word the user has to guess.
# A Letter object consists of two properties, the letter as a string and the index as a number
# The index property is the index the letter is located in the word the user has to guess
# For example, if the word is fuzzy, a Letter object representing f will have f as its letter and 0 as its index
# The index is used in the algorithm to determine if a user guessed the correct letter and if so, we show them the
# correct index of the letter they guessed in the word to help them in their next guess
@total_ordering
class Letter():
def __init__(self, letter, index):
self.letter = letter
self.index = index
def __str__(self):
return 'letter: ' + self.letter + ' index: ' + str(self.index)
def __eq__(self, other):
return self.letter == other.letter
def __lt__(self, other):
return self.letter < other.letter
# guess function which returns which letters in your guess you got correct
def guess(s):
word_to_solve = guess_word_helper_var['word']
word_to_solve_letter_object = guess_word_helper_var['letter_object']
s_arr = sorted([l for l in s]) # sorts guess word so it can compare with the word to solve which is also sorted
s_arr_new = ['_' for i in range(0, 5)] # creates an array of '_' elements so it can form a string later of the letters the user guessed correctly
is_equal = True # flag used to speed up the process of returning whether the guess was right or not
# Below: indeces are set to 0 so it can compare the first letters of the user's guess and the word to solve
word_to_solve_index = 0
s_arr_index = 0
# Below: flag that if it does not get set to False in the while loop, it will terminate the while loop
# We need this flag to tell the while loop to stop comparing letters.
# If an index is checked, it is set to False, preventing the loop from terminating until all letters have been compared
ready_to_break = True
# Below: this flag is used to determine if the guess word equals the word to solve
# We need this in case all the letters in the user's guess are correct, just not the right word
# This flag is used to inform the user about why their guess is correct
word_not_equal = s == word_to_solve
while True: # loop that does the comparison of the letters in the user's guess and the word to solve
ready_to_break = True
if s_arr[s_arr_index] == word_to_solve_letter_object[word_to_solve_index].letter:
s_arr_new[word_to_solve_letter_object[word_to_solve_index].index] = word_to_solve_letter_object[word_to_solve_index].letter
if word_to_solve_index < 4:
ready_to_break = False
word_to_solve_index += 1
if s_arr_index < 4:
ready_to_break = False
s_arr_index += 1
elif s_arr[s_arr_index] > word_to_solve_letter_object[word_to_solve_index].letter:
if word_to_solve_index < 4:
ready_to_break = False
word_to_solve_index += 1
is_equal = False
elif s_arr[s_arr_index] < word_to_solve_letter_object[word_to_solve_index].letter:
if s_arr_index < 4:
ready_to_break = False
s_arr_index += 1
is_equal = False
else:
is_equal = False
if ready_to_break:
break
if is_equal: # if True, the user's guess was correct, just return the word to solve
return word_to_solve
else:
s_arr_new_str = ' '.join(s_arr_new)
if s_arr_new_str == word_to_solve and not word_not_equal:
print("You didn't guess the word correctly, however all the letters in your word were correct just not in the\n right order, so we'll count it.")
return ''.join(s_arr_new)
# Returns a random word from a file provided in project
def get_word_to_solve():
word_to_solve_letter_object = []
# Below: Read 5 letter words from file, pick a word for the user to guess
# Below: Note: There are 5757 5 letter words in file_letter_words.txt. To get a random word first we pick a random number
# between 1 and 5757
random_word_line = random.randint(1, 5757)
# Then we read the file and stop at random_word_line and store that word
line_counter_start = 1
word_to_solve = 'fuzzy' # incase it fails to get the word
with open('/Users/jeffersonvivanco/Documents/Algorithms/python/guessingWordProblem/five_letter_words.txt',
'rt') as f:
for line in f:
if random_word_line == line_counter_start:
word_to_solve = line.strip() # picking a word
line_counter_start += 1
# making an array of letter objects of the word picked
# we save index so later the guess function can determine if the word is correct
curr_index = 0
for l in word_to_solve:
letter = Letter(l, curr_index)
word_to_solve_letter_object.append(letter)
curr_index += 1
word_to_solve_letter_object = sorted(word_to_solve_letter_object) # sorting list of list objects so we can compare with the string in the guess function
guess_word_helper_var['letter_object'] = word_to_solve_letter_object
guess_word_helper_var['word'] = word_to_solve
def get_word():
return guess_word_helper_var['word']
```
#### File: Algorithms/python/LinkedList.py
```python
class Node():
def __init__(self, number, next = None):
self.number = number
self.next = next
def _set_next_(self, next):
self.next = next
def _set_data_(self, data):
self.data = data
def _print_node_(self):
if self.next:
return 'Node number: ' + str(self. number) + ' , number of next element: ' + str(self.next.number)
else:
return 'Node number: ' + str(self. number) + ' , number of next element: None'
class LinkedList():
def __init__(self, node):
self.head = node
self.next = None
def _add_node(self, node):
temp = self.head
while not temp.next is None:
temp = temp.next
temp.next = node
def _delete_node(self, key):
temp = self.head
if temp is None:
return None
while not temp.number is key and not temp.next is None:
follower = temp
temp = temp.next
if not temp.number is key:
return None
if temp.number is key and temp is self.head:
deleted_number = temp.number
self.head = temp.next
return deleted_number
if temp.number is key and not temp is self.head:
deleted_number = temp.number
follower.next = temp.next
return deleted_number
return None
# delete node recursive method
def _delete_node_rec(self, key):
if self.head is None:
return None
else:
self.head = self._delete_node_rec_helper(self.head, key)
def _delete_node_rec_helper(self, L, key):
if L is None:
return None
if L.number is key:
return L.next
else:
L._set_next_(self._delete_node_rec_helper(L.next, key))
return L
def _print_list_(self):
temp = self.head
str_rep = str(temp.number)
while not temp.next is None:
temp = temp.next
str_rep += ' -> ' + str(temp.number)
return str_rep
head = LinkedList(Node(1))
head._add_node(Node(2))
head._add_node(Node(3))
head._add_node(Node(4))
head._add_node(Node(5))
head._add_node(Node(6))
head._add_node(Node(7))
head._delete_node_rec(1)
print('Deleting number ' + str(1))
print(head._print_list_())
```
|
{
"source": "jeffersonvivanco/LeetCode",
"score": 4
}
|
#### File: LeetCode/python/adding_two_numbers.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def addTwoNumbers(l1, l2):
l1_temp = l1
l2_temp = l2
res = None
res_temp = None
carry_over = 0
while l1_temp != None or l2_temp != None:
total = carry_over
if l1_temp != None:
total += l1_temp.val
l1_temp = l1_temp.next
if l2_temp != None:
total += l2_temp.val
l2_temp = l2_temp.next
carry_over = 0
if total >= 10:
total = total % 10
carry_over = 1
if res_temp == None:
res_temp = ListNode(total)
res= res_temp
else:
res_temp.next = ListNode(total)
res_temp = res_temp.next
if carry_over == 1:
res_temp.next = ListNode(carry_over)
return res
import unittest
class AddTwoNumbersTest(unittest.TestCase):
def test_add_two_numbers(self):
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
res = addTwoNumbers(l1, l2)
while res != None:
print(res.val)
res = res.next
unittest.main()
```
#### File: LeetCode/python/longest_substring.py
```python
def length_of_longest_substring(s):
letters = {}
maxz = 0
count = 0
start_index = 0
for l in range(0, len(s)):
if s[l] not in letters:
letters[s[l]] = l
count += 1
else:
maxz = max(count, maxz)
# check if repeated letter is part of current longest substring
if (l - count) > (letters[s[l]] + 1):
count += 1
letters[s[l]] = l
continue
# get index of repeated letter
r = letters[s[l]]
# setting new count
count -= (r - start_index) + 1
count += 1
# setting start index of current longest substring
# we set it to the next letter, next to the repeated letter
start_index = r + 1
# del repeated letter
del letters[s[l]]
# adding new letter
letters[s[l]] = l
maxz = max(maxz, count)
return maxz
print(length_of_longest_substring("aabaab!bb"))
```
#### File: LeetCode/python/median_of_two_sorted_arrays.py
```python
import math
def findMedianSortedArrays(nums1, nums2):
nums = {}
index = 0
nums1_it = iter(nums1)
nums2_it = iter(nums2)
n1 = None
n2 = None
while True:
if n1 == None:
n1 = next(nums1_it, None)
if n2 == None:
n2 = next(nums2_it, None)
if n1 != None and n2 != None:
if n1 < n2:
nums[index] = n1
n1 = None
else:
nums[index] = n2
n2 = None
index += 1
continue
if n1 != None:
nums[index] = n1
index += 1
n1 = None
continue
if n2 != None:
nums[index] = n2
index += 1
n2 = None
continue
if n1 == None and n2 == None:
break
if (len(nums1) + len(nums2)) % 2 == 0:
median = even_median(nums1, nums2, nums)
return median
else:
median = odd_median(nums1, nums2, nums)
return median
def even_median(arr1, arr2, nums):
index1 = int((len(arr1) + len(arr2))/ 2)
index2 = index1 - 1
return (nums[index1] + nums[index2]) / 2
def odd_median(arr1, arr2, nums):
index = int(math.floor((len(arr1) + len(arr2) )/ 2))
return nums[index]
import unittest
class FindMedianSortedArraysTest(unittest.TestCase):
def test_find_median_sorted_arrays_1(self):
nums1 = [1, 3]
nums2 = [2]
median = findMedianSortedArrays(nums1, nums2)
self.assertEqual(median, 2.0)
def test_find_median_sorted_arrays_2(self):
nums1 = []
nums2 = [1]
median = findMedianSortedArrays(nums1, nums2)
self.assertEqual(median, 1)
def test_find_median_sorted_arrays_3(self):
nums1 = [3]
nums2 = [-2, -1]
median = findMedianSortedArrays(nums1, nums2)
self.assertEqual(median, -1.0)
def test_find_median_sorted_arrays_4(self):
nums1 = [1, 2]
nums2 = [3, 4]
median = findMedianSortedArrays(nums1, nums2)
self.assertEqual(median, 2.5)
unittest.main()
```
|
{
"source": "JeffersonXie/IEFP",
"score": 3
}
|
#### File: JeffersonXie/IEFP/custom_datasets_2.py
```python
import os
import torch
import torch.utils.data as data
from PIL import Image
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
def default_loader(path):
return Image.open(path).convert('RGB')
class Dataset_floder(data.Dataset):
def __init__(self, data_root, data_list, transform = None, loader=default_loader):
with open (data_list) as f:
lines=f.readlines()
imgs=[]
for line in lines:
cls = line.split()
img_a_name = cls.pop(0)
img_b_name = cls.pop(0)
pair_label = cls.pop(0)
if os.path.isfile(os.path.join(data_root, img_a_name)) and os.path.isfile(os.path.join(data_root, img_b_name)):
# imgs.append((img_a_name, img_b_name, tuple([int(v) for v in cls])))
imgs.append((img_a_name, img_b_name, int(pair_label)))
self.data_root = data_root
self.imgs = imgs
self.transform = transform
self.loader = loader
def __getitem__(self, index):
# def __getitem__(self, index: int) -> Tuple[Any, Any, Any]:
img_a_name, img_b_name, label = self.imgs[index]
img_a = self.loader(os.path.join(self.data_root, img_a_name))
img_b = self.loader(os.path.join(self.data_root, img_b_name))
if self.transform is not None:
img_a = self.transform(img_a)
img_b = self.transform(img_b)
return (img_a, img_b, label)
def __len__(self):
return len(self.imgs)
```
#### File: JeffersonXie/IEFP/custom_datasets.py
```python
import os, re
from torchvision.datasets import ImageFolder
from utils import path2age
class ImageFolderWithAges(ImageFolder):
"""Custom dataset that includes face image age. Extends
torchvision.datasets.ImageFolder
"""
def __init__(self, pat, pos, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pat = pat
self.pos = pos
# override the __getitem__ method. this is the method that dataloader calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithAges, self).__getitem__(index)
age = path2age(self.imgs[index][0], self.pat, self.pos)
# make a new tuple that includes original and the path
tuple_with_path = (original_tuple + (age,))
return tuple_with_path
class ImageFolderWithAgeGroup(ImageFolder):
"""Custom dataset that includes face image age group, categorized by [0-12, 13-18, 19-25,
26-35, 36-45, 46-55, 56-65, >= 66]. Extends torchvision.datasets.ImageFolder
"""
def __init__(self, pat, pos, cutoffs, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pat = pat
self.pos = pos
self.cutoffs = cutoffs
# override the __getitem__ method. this is the method that dataloader calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithAgeGroup, self).__getitem__(index)
age = path2age(self.imgs[index][0], self.pat, self.pos)
# make a new tuple that includes original and the age group
# tuple_with_path = (original_tuple + (self.find_group(age),))
tuple_with_path = original_tuple + (self.find_group(age),)
return tuple_with_path
def find_group(self, age):
group = 0
for cut in self.cutoffs:
if age > cut:
group += 1
return group
class ImageFolderWithAgeGroup_2(ImageFolder):
"""Custom dataset that includes face image age group, categorized by [0-12, 13-18, 19-25,
26-35, 36-45, 46-55, 56-65, >= 66]. Extends torchvision.datasets.ImageFolder
"""
def __init__(self, pat, pos, cutoffs, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pat = pat
self.pos = pos
self.cutoffs = cutoffs
# override the __getitem__ method. this is the method that dataloader calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithAgeGroup_2, self).__getitem__(index)
age = path2age(self.imgs[index][0], self.pat, self.pos)
# make a new tuple that includes original and the age group
img_path_comp = self.imgs[index][0].split('/')
img_path_comp.pop(0)
img_path_comp.pop(0)
img_path_comp.pop(0)
img_path_comp.pop(0)
img_path_comp.pop(0)
img_path_comp.pop(0)
##### this function is specially modified for FG-NET dataset
tmp_img_path = '/'.join(img_path_comp)
tuple_with_path = original_tuple + (self.find_group(age), tmp_img_path)
return tuple_with_path
def find_group(self, age):
group = 0
for cut in self.cutoffs:
if age > cut:
group += 1
return group
```
#### File: JeffersonXie/IEFP/fr_metrics.py
```python
import torch.nn as nn
import torch
import torchvision
import torch.optim as optim
import torch.nn.functional as F
from torchvision import datasets, models, transforms
from torch.nn import Parameter
import os
import numpy as np
import torchvision.models as models
import math
class ArcMarginProduct(nn.Module):
### arcface ###
def __init__(self, out_feature=10575, in_feature=512, s=64.0, m=0.50, easy_margin=False):
super(ArcMarginProduct, self).__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.s = s
self.m = m
self.weight = nn.Parameter(torch.Tensor(out_feature, in_feature))
nn.init.xavier_uniform_(self.weight)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
# make the function cos(theta+m) monotonic decreasing while theta in [0°,180°]
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, x, label):
# cos(theta)
cosine_theta = F.linear(F.normalize(x), F.normalize(self.weight))
cosine_theta = cosine_theta.clamp(-1,1) # for numerical stability
# cos(theta + m)
sine_theta = torch.sqrt(1.0 - torch.pow(cosine_theta, 2))
cosine_theta_m = cosine_theta * self.cos_m - sine_theta * self.sin_m
if self.easy_margin:
cosine_theta_m = torch.where(cosine_theta > 0, cosine_theta_m, cosine_theta)
else:
cosine_theta_m = torch.where((cosine_theta - self.th) > 0, cosine_theta_m, cosine_theta - self.mm)
#one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu')
one_hot = torch.zeros_like(cosine_theta)
one_hot.scatter_(1, label.view(-1, 1), 1)
output = (one_hot * cosine_theta_m) + ((1.0 - one_hot) * cosine_theta)
output = output * self.s
return output
class AddMarginProduct(nn.Module):
### cosface ###
def __init__(self, out_features=10575, in_features=512, s=64.0, m=0.35):
super(AddMarginProduct, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
phi = cosine - self.m
# --------------------------- convert label to one-hot ---------------------------
one_hot = torch.zeros_like(cosine)
one_hot.scatter_(1, label.view(-1, 1), 1)
# print(one_hot.size())
# os._exit(0)
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output *= self.s
return output
```
#### File: JeffersonXie/IEFP/fr_models.py
```python
import torch
import os
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from fr_metrics import CosMargin, ArcMarginProduct, AddMarginProduct
from mi_neural_estimators import CLUB, CLUBSample
from iresnet_plus_spu import iresnet50_spu
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif classname.find('BatchNorm2d') != -1:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif classname.find('Linear') != -1:
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
class fr_model_1(nn.Module):
def __init__(self, n_cls, args):
super().__init__()
face_id_loss = args.face_id_loss
self.backbone_id = iresnet50_spu(dropout=0, num_features=args.feature_dim, spu_scale=args.spu_scale, fp16=args.fp16)
if face_id_loss in 'cosface':
self.margin_fc = AddMarginProduct(n_cls, args.feature_dim, args.scale, args.margin)
elif face_id_loss in 'arcface':
self.margin_fc = ArcMarginProduct(n_cls, args.feature_dim, args.scale, args.margin)
# initialize
if not args.pretrained:
self.backbone_id.apply(init_weights)
self.margin_fc.apply(init_weights)
def forward(self, xs, ys=None, emb=False):
# 512-D embedding
embs_id = self.backbone_id(xs)
if emb:
return F.normalize(embs_id)
id_logits = self.margin_fc(embs_id, ys)
return id_logits
```
#### File: JeffersonXie/IEFP/train.py
```python
import sys
import argparse
import random
import warnings
import os
from itertools import chain
import math
import copy
import time
import numpy as np
from fr_ae_mi_models import fr_ae_mi_model_1
from custom_datasets import ImageFolderWithAgeGroup
from custom_datasets_2 import Dataset_floder
from meta import age_cutoffs, ms1mv3, FGNET, processed_CACD_VS, processed_CALFW
from meta import MORPH2_probe_3000, MORPH2_gallery_3000, MORPH2_probe_10000, MORPH2_gallery_10000
from utils import multi_accuracies, accuracy_percent, calculate_roc_auc_eer, argmax_mae
from pre_process import image_test_1crop_transform, image_test_2crops_transform
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
from torch.cuda.amp import GradScaler
parser = argparse.ArgumentParser(description='PyTorch age-invarinat face recognition')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,metavar='W',
help='weight decay (default: 1e-4)', dest='weight_decay')
parser.add_argument('--batch-size', default=512, type=int)
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--epochs', default=42, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--schedule', default=[20, 30, 40, 50], nargs='+', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--output_dir', type=str, default='ms1mv3_iresnet50_frspu_ae_mi',
help="output directory of our model (in ../snapshot directory)")
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training.')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:12345', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--feature_dim', default=512, type=int)
parser.add_argument('--spu_scale', default=1, type=int)
parser.add_argument('--fp16', default=False, type=bool)
# options for pure face recognition
parser.add_argument('--face-id-loss', default='cosface', type=str, choices=['cosface', 'arcface'])
parser.add_argument('--scale', default=64.0, type=float, metavar='M',
help='scaler in arcface or cosface')
parser.add_argument('--margin', default=0.35, type=float, metavar='M',
help='angular margin in arcface or cosine margin in cosface')
parser.add_argument('--lambda1', default=0.001, type=float)
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
model = fr_ae_mi_model_1(n_cls=93431, args=args)
dict_pretrained_ae_model = torch.load('./pretrained_age_estimation_models/ms1mv3_AgeDB_iresnet50_ae_1.pth')
dict_new = model.state_dict().copy()
pretrained_list = list(dict_pretrained_ae_model.keys())
for i in range(len(pretrained_list)):
tmp_name = pretrained_list[i]
tmp_splits = tmp_name.split('.')
if 'backbone' in tmp_name:
tmp_splits[0] = tmp_splits[0] + '_ae'
new_tmp_name = '.'.join(tmp_splits)
dict_new[new_tmp_name] = dict_pretrained_ae_model[tmp_name]
model.load_state_dict(dict_new)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
# define optimizers
optimizer_1 = torch.optim.SGD(
[{'params':model.module.backbone_id.parameters()},
{'params':model.module.margin_fc.parameters()}],
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer_2 = torch.optim.SGD(model.module.clubmi.parameters(),
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
cudnn.benchmark = True
# Data loading code
train_ds = ImageFolderWithAgeGroup(ms1mv3['pat'], ms1mv3['pos'], age_cutoffs, ms1mv3['file_root'],
transform=transforms.Compose([
transforms.Resize(112),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]))
test_2ds_FGNET = [ImageFolderWithAgeGroup(FGNET['pat'], FGNET['pos'], age_cutoffs, FGNET['file_root'],
transform=image_test_2crops_transform(resize_size=112, crop_size=112,
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])[i]) for i in range(2)]
test_2ds_CACD_VS = [Dataset_floder(correct_CACD_VS['file_root'], correct_CACD_VS['pair_lists_root'],
transform=image_test_2crops_transform(resize_size=112, crop_size=112,
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])[i]) for i in range(2)]
test_2ds_CALFW = [Dataset_floder(correct_CALFW['file_root'], correct_CALFW['pair_lists_root'],
transform=image_test_2crops_transform(resize_size=112, crop_size=112,
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])[i]) for i in range(2)]
test_2ds_morph2_probe_3000 = [ImageFolderWithAgeGroup(MORPH2_probe_3000['pat'],
MORPH2_probe_3000['pos'], age_cutoffs, MORPH2_probe_3000['file_root'],
transform=image_test_2crops_transform(resize_size=112, crop_size=112,
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])[i]) for i in range(2)]
test_2ds_morph2_gallery_3000 = [ImageFolderWithAgeGroup(MORPH2_gallery_3000['pat'],
MORPH2_gallery_3000['pos'], age_cutoffs, MORPH2_gallery_3000['file_root'],
transform=image_test_2crops_transform(resize_size=112, crop_size=112,
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])[i]) for i in range(2)]
test_2ds_morph2_probe_10000 = [ImageFolderWithAgeGroup(MORPH2_probe_10000['pat'],
MORPH2_probe_10000['pos'], age_cutoffs, MORPH2_probe_10000['file_root'],
transform=image_test_2crops_transform(resize_size=112, crop_size=112,
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])[i]) for i in range(2)]
test_2ds_morph2_gallery_10000 = [ImageFolderWithAgeGroup(MORPH2_gallery_10000['pat'],
MORPH2_gallery_10000['pos'], age_cutoffs, MORPH2_gallery_10000['file_root'],
transform=image_test_2crops_transform(resize_size=112, crop_size=112,
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])[i]) for i in range(2)]
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_ds)
else:
train_sampler = None
train_ld = torch.utils.data.DataLoader(
train_ds, shuffle=(train_sampler is None), batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
test_2ld_FGNET = [torch.utils.data.DataLoader(
test_2ds_FGNET[i], shuffle=False, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True) for i in range(2)]
test_2ld_CACD_VS = [torch.utils.data.DataLoader(
test_2ds_CACD_VS[i], shuffle=False, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True) for i in range(2)]
test_2ld_CALFW = [torch.utils.data.DataLoader(
test_2ds_CALFW[i], shuffle=False, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True) for i in range(2)]
test_2ld_morph2_probe_3000 = [torch.utils.data.DataLoader(
test_2ds_morph2_probe_3000[i], shuffle=False, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True) for i in range(2)]
test_2ld_morph2_gallery_3000 = [torch.utils.data.DataLoader(
test_2ds_morph2_gallery_3000[i], shuffle=False, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True) for i in range(2)]
test_2ld_morph2_probe_10000 = [torch.utils.data.DataLoader(
test_2ds_morph2_probe_10000[i], shuffle=False, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True) for i in range(2)]
test_2ld_morph2_gallery_10000 = [torch.utils.data.DataLoader(
test_2ds_morph2_gallery_10000[i], shuffle=False, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True) for i in range(2)]
if not os.path.exists('snapshot/' + args.output_dir):
os.makedirs('snapshot/' + args.output_dir, exist_ok=True)
log_file = 'snapshot/' + args.output_dir + '/log_train.csv'
save_path = 'saved_models/' + args.output_dir
if not os.path.exists(save_path):
os.makedirs(save_path, exist_ok=True)
with open(log_file, 'a+') as f:
f.write('\n')
f.write('start recording\n')
grad_scaler = GradScaler()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer_1, epoch, args)
adjust_learning_rate(optimizer_2, epoch, args)
# train for one epoch
train(grad_scaler, train_ld, model, optimizer_1, optimizer_2, epoch, log_file, args)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
filename = 'ms1mv3_iresnet50_IEFP.pth'
file_path = os.path.join(save_path,filename)
torch.save(model.state_dict(),file_path)
# evaluate on test set
test_fi_ae_FGNET(test_2ld_FGNET, model, log_file, args, crop_number=2, crops_mix_mode='concatenate')
test_fi_ae_FGNET(test_2ld_FGNET, model, log_file, args, crop_number=2, crops_mix_mode='add')
test_face_verification(test_2ld_CALFW, model, 'CALFW', log_file, args, crop_number=2, crops_mix_mode='concatenate')
test_face_verification(test_2ld_CALFW, model, 'CALFW', log_file, args, crop_number=2, crops_mix_mode='add')
test_face_identification_morph2(test_2ld_morph2_probe_3000, test_2ld_morph2_gallery_3000,
test_2ld_morph2_probe_10000, test_2ld_morph2_gallery_10000,
model, log_file, args, crop_number=2, crops_mix_mode='concatenate')
test_face_identification_morph2(test_2ld_morph2_probe_3000, test_2ld_morph2_gallery_3000,
test_2ld_morph2_probe_10000, test_2ld_morph2_gallery_10000,
model, log_file, args, crop_number=2, crops_mix_mode='add')
test_face_verification(test_2ld_CACD_VS, model, 'CACD_VS', log_file, args, crop_number=2, crops_mix_mode='concatenate')
test_face_verification(test_2ld_CACD_VS, model, 'CACD_VS', log_file, args, crop_number=2, crops_mix_mode='add')
with open(log_file,'a+') as f:
f.write('log_over\n')
dist.destroy_process_group()
def train(grad_scaler, train_loader, model, optimizer_1, optimizer_2, current_epoch, log_file, args):
fr_loss = AverageMeter('face_recognition_loss',':.4f')
fr_acc1 = AverageMeter('face_recognition_accuracy_rank1', ':.3f')
ae_loss = AverageMeter('age_estimation_loss', ':.4f')
mi_est = AverageMeter('mi_estimation', ':.4f')
loglikeli = AverageMeter('loglikelihood of CLUB-net', '.4f')
argmax_MAE = AverageMeter('argmax_MAE', ':.2f')
model.module.backbone_ae.train(False)
model.module.age_estimator.train(False)
model.module.backbone_id.train(True)
model.module.margin_fc.train(True)
model.module.clubmi.train(True)
set_train_mode_2(model, False)
for i, (xs, ys, real_age_range) in enumerate(train_loader):
if args.gpu is not None and torch.cuda.is_available():
xs = xs.cuda(args.gpu, non_blocking=True)
ys = ys.cuda(args.gpu, non_blocking=True)
real_age_range = real_age_range.cuda(args.gpu, non_blocking=True)
real_batch_size = len(ys)
# maximize the loglikelihood and update the CLUBnet
set_train_mode_1(model, False)
_, _, _, loglikelihood = model(xs, ys)
total_loss = -0.1 * loglikelihood
if args.fp16:
grad_scaler.scale(total_loss).backward()
grad_scaler.unscale_(optimizer_2)
clip_grad_norm_(model.parameters(), max_norm=5, norm_type=2)
grad_scaler.step(optimizer_2)
grad_scaler.update()
else:
total_loss.backward()
clip_grad_norm_(model.parameters(), max_norm=5, norm_type=2)
optimizer_2.step()
loglikeli.update(loglikelihood.item(), real_batch_size)
optimizer_2.zero_grad()
# minize the fr loss function and mi estimation
set_train_mode_1(model, True)
id_logits, age_logits, mi_estimation, _ = model(xs, ys)
FR_criterion = nn.CrossEntropyLoss()
AE_criterion = nn.CrossEntropyLoss()
FR_Loss = FR_criterion(id_logits, ys)
AE_Loss = AE_criterion(age_logits, real_age_range)
FR_Acc = multi_accuracies(id_logits, ys, topk=(1,))
batch_mae = argmax_mae(age_logits.cpu(), real_age_range.cpu())
total_loss = FR_Loss + mi_estimation * args.lambda1
if args.fp16:
grad_scaler.scale(total_loss).backward()
grad_scaler.unscale_(optimizer_1)
clip_grad_norm_(model.parameters(), max_norm=5, norm_type=2)
grad_scaler.step(optimizer_1)
grad_scaler.update()
else:
total_loss.backward()
clip_grad_norm_(model.parameters(), max_norm=5, norm_type=2)
optimizer_1.step()
fr_loss.update(FR_Loss.item(), real_batch_size)
ae_loss.update(AE_Loss.item(), real_batch_size)
mi_est.update(mi_estimation.item(), real_batch_size)
argmax_MAE.update(batch_mae.item(), real_batch_size)
fr_acc1.update(FR_Acc[0].item(), real_batch_size)
optimizer_1.zero_grad()
with open(log_file,'a+') as f:
f.write('Train--- {:d}/{:d}, fr_loss:{:.4f}, ae_loss:{:.4f}, mi_estimation:{:.4f}, loglikelihood:{:.4f}, '\
'fr_acces:{:.2f}%, argmax_MAE:{:.2f}\n'.format(
current_epoch, args.epochs, fr_loss.avg, ae_loss.avg, mi_est.avg, loglikeli.avg,
fr_acc1.avg, argmax_MAE.avg))
def test_fi_ae_FGNET(test_loaders, model, log_file, args, crop_number=2, crops_mix_mode='concatenate'):
mi_est = AverageMeter('mi_estimation', ':.2f')
loglikeli = AverageMeter('loglikelihood of CLUB-net', '.2f')
argmax_MAE = AverageMeter('argmax_MAE', ':.2f')
model.train(False)
with torch.no_grad():
loader_list = [iter(test_loaders[i]) for i in range(crop_number)]
for j in range(len(loader_list[0])):
for k in range(crop_number):
xs, ys, real_age_range = loader_list[k].next()
if args.gpu is not None and torch.cuda.is_available():
xs = xs.cuda(args.gpu, non_blocking=True)
bs_xs_embedding, bs_age_logits, bs_mi_estimation, bs_loglikelihood = model(xs, emb=True)
if k == 0:
mixed_bs_xs_embedding = bs_xs_embedding
mixed_bs_age_probs = F.softmax(bs_age_logits, dim=1)
mixed_bs_mi_estimation = bs_mi_estimation
mixed_bs_loglikelihood = bs_loglikelihood
else:
if crops_mix_mode == 'concatenate':
mixed_bs_xs_embedding = torch.cat((mixed_bs_xs_embedding, bs_xs_embedding),dim=1)
else:
mixed_bs_xs_embedding = torch.add(mixed_bs_xs_embedding, bs_xs_embedding)
mixed_bs_age_probs += F.softmax(bs_age_logits, dim=1)
mixed_bs_mi_estimation += bs_mi_estimation
mixed_bs_loglikelihood += bs_loglikelihood
mixed_bs_age_probs =mixed_bs_age_probs / float(crop_number)
mixed_bs_mi_estimation = mixed_bs_mi_estimation / float(crop_number)
mixed_bs_loglikelihood = mixed_bs_loglikelihood / float(crop_number)
real_batch_size = len(ys)
bs_argmax_MAE = argmax_mae(mixed_bs_age_probs.cpu(), real_age_range)
argmax_MAE.update(bs_argmax_MAE.item(), real_batch_size)
mi_est.update(mixed_bs_mi_estimation.item(), real_batch_size)
loglikeli.update(mixed_bs_loglikelihood.item(), real_batch_size)
if j == 0:
total_xs_embedding = mixed_bs_xs_embedding
gt_labels = ys
else:
total_xs_embedding = torch.cat((total_xs_embedding, mixed_bs_xs_embedding), dim=0)
gt_labels = torch.cat((gt_labels, ys), dim=0)
total_xs_embedding = F.normalize(total_xs_embedding.cpu(), p=2, dim=1)
##############
## calculate the face identification rank-1 accuracy on under LOPO protocol
logits = total_xs_embedding.mm(total_xs_embedding.t())
mask = 1.0 - torch.eye(logits.size(0))
logits = logits.mul(mask)
_, pred_pos = logits.topk(1, dim=1, largest=True, sorted=True)
pred_pos = pred_pos.squeeze()
pred_labels = torch.zeros_like(gt_labels)
for i in range(len(gt_labels)):
pred_labels[i] = gt_labels[pred_pos[i]]
fr_acc_LOPO = accuracy_percent(pred_labels, gt_labels)
##############
##############
## calculate the face identification rank 1 to 10 accuracy under the settings of meface face identification
full_indexes = torch.arange(len(gt_labels))
for i in range(len(gt_labels)):
tmp_id = gt_labels[i]
eq_mask = torch.eq(gt_labels, tmp_id)
eq_mask[i]= torch.tensor(0).bool()
eq_indexes = full_indexes[eq_mask]
ne_mask = torch.ne(gt_labels, tmp_id)
ne_indexes = full_indexes[ne_mask]
tmp_probe_set = torch.index_select(total_xs_embedding, 0, eq_indexes)
tmp_gallery_set = torch.index_select(total_xs_embedding, 0, ne_indexes)
tmp_gallery_set = torch.cat((tmp_gallery_set, total_xs_embedding[i,:].view(1,-1)), dim=0)
true_tmp_gallery_labels = torch.cat((gt_labels[ne_mask], torch.tensor([tmp_id])), dim=0).view(1, -1)
true_tmp_gallery_labels = true_tmp_gallery_labels.expand(tmp_probe_set.size(0), -1)
tmp_logits = tmp_probe_set.mm(tmp_gallery_set.t())
_, pred_poses = tmp_logits.topk(10, dim=1, largest=True, sorted=True)
pred_tmp_gallery_labels_ranks = torch.zeros_like(pred_poses)
for j in range(tmp_logits.size(0)):
pred_tmp_gallery_labels_ranks[j,:] = true_tmp_gallery_labels[j,:].index_select(0, pred_poses[j,:])
tmp_probe_labels_ranks = torch.full_like(pred_tmp_gallery_labels_ranks, tmp_id)
if i == 0:
# total_probe_labels_ranks = copy.deepcopy(tmp_probe_labels_ranks)
# total_pred_gallery_labels_ranks = copy.deepcopy(pred_tmp_gallery_labels_ranks)
total_probe_labels_ranks = tmp_probe_labels_ranks
total_pred_gallery_labels_ranks = pred_tmp_gallery_labels_ranks
else:
total_probe_labels_ranks = torch.cat((total_probe_labels_ranks, tmp_probe_labels_ranks),dim=0)
total_pred_gallery_labels_ranks = torch.cat((total_pred_gallery_labels_ranks,pred_tmp_gallery_labels_ranks),dim=0)
correct = total_probe_labels_ranks.eq(total_pred_gallery_labels_ranks)
id_acces = []
for k in range(1,11):
correct_k = correct[:,0:k].reshape(-1).float().sum(0, keepdim=True)
id_acces.append(correct_k.mul_(100.0 / total_probe_labels_ranks.size(0)).item())
##############
with open(log_file, 'a+') as f:
f.write('Test on FGNET dataset, --{} mix-- megafce_id_acces:{:.2f}%, LOPO_id_acc:{:.2f}%, '\
'argmax_MAE:{:.2f}, mi_estimation:{:.2f}, loglikelihood:{:.2f}\n'.format(
crops_mix_mode, id_acces[0], fr_acc_LOPO, argmax_MAE.avg, mi_est.avg, loglikeli.avg))
def test_face_verification(test_loaders, model, test_set_name, log_file, args, crop_number=2, crops_mix_mode='concatenate'):
model.train(False)
with torch.no_grad():
loader_list = [iter(test_loaders[i]) for i in range(crop_number)]
for j in range(len(loader_list[0])):
for k in range(crop_number):
xs_a, xs_b, mb_lbls = loader_list[k].next()
if args.gpu is not None and torch.cuda.is_available():
xs_a = xs_a.cuda(args.gpu, non_blocking=True)
xs_b = xs_b.cuda(args.gpu, non_blocking=True)
xs_a_embedding, _, _, _ = model(xs_a, emb=True)
xs_b_embedding, _, _, _ = model(xs_b, emb=True)
if k == 0:
concatenate_xs_a_embedding = xs_a_embedding
concatenate_xs_b_emdedding = xs_b_embedding
else:
if crops_mix_mode == 'concatenate':
concatenate_xs_a_embedding = torch.cat((concatenate_xs_a_embedding, xs_a_embedding), dim=1)
concatenate_xs_b_emdedding = torch.cat((concatenate_xs_b_emdedding, xs_b_embedding), dim=1)
else:
concatenate_xs_a_embedding = torch.add(concatenate_xs_a_embedding, xs_a_embedding)
concatenate_xs_b_emdedding = torch.add(concatenate_xs_b_emdedding, xs_b_embedding)
if j== 0:
total_xs_a_embedding = concatenate_xs_a_embedding
total_xs_b_embedding = concatenate_xs_b_emdedding
gt_labels = mb_lbls
else:
total_xs_a_embedding = torch.cat((total_xs_a_embedding, concatenate_xs_a_embedding), dim=0)
total_xs_b_embedding = torch.cat((total_xs_b_embedding, concatenate_xs_b_emdedding), dim=0)
gt_labels = torch.cat((gt_labels, mb_lbls), dim=0)
total_xs_a_embedding = F.normalize(total_xs_a_embedding.cpu(), p=2, dim=1)
total_xs_b_embedding = F.normalize(total_xs_b_embedding.cpu(), p=2, dim=1)
total_xs_a_embedding = total_xs_a_embedding.numpy()
total_xs_b_embedding = total_xs_b_embedding.numpy()
gt_labels = gt_labels.numpy().astype(bool)
thresholds = np.arange(-1, 1.01, 0.01)
_, _, auc, eer, verif_acc, _ = calculate_roc_auc_eer(
thresholds,
total_xs_a_embedding,
total_xs_b_embedding,
gt_labels,
nrof_folds = 10)
with open(log_file, 'a+') as f:
f.write('Test on {} dataset --{} mix-- AUC:{:.2f}%, EER:{:.2f}%, Verif_accuracy:{:.2f}%\n'.format(
test_set_name, crops_mix_mode, auc*100.0, eer*100.0, verif_acc*100.0))
def test_face_identification_morph2(test_loaders_probe_3000, test_loaders_gallery_3000,
test_loaders_probe_10000, test_loaders_gallery_10000,
model, log_file, args, crop_number=2, crops_mix_mode='concatenate'):
model.train(False)
with torch.no_grad():
test_loaders_probe_3000_list = [iter(test_loaders_probe_3000[i]) for i in range(crop_number)]
for j in range(len(test_loaders_probe_3000_list[0])):
for k in range(crop_number):
xs, ys, _ = test_loaders_probe_3000_list[k].next()
if args.gpu is not None and torch.cuda.is_available():
xs = xs.cuda(args.gpu, non_blocking=True)
bs_xs_embedding, _, _, _ = model(xs, emb=True)
if k == 0:
mixed_bs_xs_embedding = bs_xs_embedding
else:
if crops_mix_mode == 'concatenate':
mixed_bs_xs_embedding = torch.cat(
(mixed_bs_xs_embedding, bs_xs_embedding),dim=1)
else:
mixed_bs_xs_embedding = torch.add(
mixed_bs_xs_embedding, bs_xs_embedding)
if j == 0:
total_xs_embedding = mixed_bs_xs_embedding
gt_labels = ys
else:
total_xs_embedding = torch.cat((total_xs_embedding, mixed_bs_xs_embedding), dim=0)
gt_labels = torch.cat((gt_labels, ys), dim=0)
total_xs_embedding_probe_3000 = F.normalize(total_xs_embedding.cpu(), p=2, dim=1)
gt_labels_probe_3000 = gt_labels
########################
test_loaders_gallery_3000_list = [iter(test_loaders_gallery_3000[i]) for i in range(crop_number)]
for j in range(len(test_loaders_gallery_3000_list[0])):
for k in range(crop_number):
xs, ys, _ = test_loaders_gallery_3000_list[k].next()
if args.gpu is not None and torch.cuda.is_available():
xs = xs.cuda(args.gpu, non_blocking=True)
bs_xs_embedding, _, _, _ = model(xs, emb=True)
if k == 0:
mixed_bs_xs_embedding = bs_xs_embedding
else:
if crops_mix_mode == 'concatenate':
mixed_bs_xs_embedding = torch.cat(
(mixed_bs_xs_embedding, bs_xs_embedding),dim=1)
else:
mixed_bs_xs_embedding = torch.add(
mixed_bs_xs_embedding, bs_xs_embedding)
if j == 0:
total_xs_embedding = mixed_bs_xs_embedding
gt_labels = ys
else:
total_xs_embedding = torch.cat((total_xs_embedding, mixed_bs_xs_embedding), dim=0)
gt_labels = torch.cat((gt_labels, ys), dim=0)
total_xs_embedding_gallery_3000 = F.normalize(total_xs_embedding.cpu(), p=2, dim=1)
gt_labels_gallery_3000 = gt_labels
########################
test_loaders_probe_10000_list = [iter(test_loaders_probe_10000[i]) for i in range(crop_number)]
for j in range(len(test_loaders_probe_10000_list[0])):
for k in range(crop_number):
xs, ys, _ = test_loaders_probe_10000_list[k].next()
if args.gpu is not None and torch.cuda.is_available():
xs = xs.cuda(args.gpu, non_blocking=True)
bs_xs_embedding, _, _, _ = model(xs, emb=True)
if k == 0:
mixed_bs_xs_embedding = bs_xs_embedding
else:
if crops_mix_mode == 'concatenate':
mixed_bs_xs_embedding = torch.cat(
(mixed_bs_xs_embedding, bs_xs_embedding),dim=1)
else:
mixed_bs_xs_embedding = torch.add(
mixed_bs_xs_embedding, bs_xs_embedding)
if j == 0:
total_xs_embedding = mixed_bs_xs_embedding
gt_labels = ys
else:
total_xs_embedding = torch.cat((total_xs_embedding, mixed_bs_xs_embedding), dim=0)
gt_labels = torch.cat((gt_labels, ys), dim=0)
total_xs_embedding_probe_10000 = F.normalize(total_xs_embedding.cpu(), p=2, dim=1)
gt_labels_probe_10000 = gt_labels
########################
test_loaders_gallery_10000_list = [iter(test_loaders_gallery_10000[i]) for i in range(crop_number)]
for j in range(len(test_loaders_gallery_10000_list[0])):
for k in range(crop_number):
xs, ys, _ = test_loaders_gallery_10000_list[k].next()
if args.gpu is not None and torch.cuda.is_available():
xs = xs.cuda(args.gpu, non_blocking=True)
bs_xs_embedding, _, _, _ = model(xs, emb=True)
if k == 0:
mixed_bs_xs_embedding = bs_xs_embedding
else:
if crops_mix_mode == 'concatenate':
mixed_bs_xs_embedding = torch.cat(
(mixed_bs_xs_embedding, bs_xs_embedding),dim=1)
else:
mixed_bs_xs_embedding = torch.add(
mixed_bs_xs_embedding, bs_xs_embedding)
if j == 0:
total_xs_embedding = mixed_bs_xs_embedding
gt_labels = ys
else:
total_xs_embedding = torch.cat((total_xs_embedding, mixed_bs_xs_embedding), dim=0)
gt_labels = torch.cat((gt_labels, ys), dim=0)
total_xs_embedding_gallery_10000 = F.normalize(total_xs_embedding.cpu(), p=2, dim=1)
gt_labels_gallery_10000 = gt_labels
########################
## calculate the face identification rank-1 accuracy
logits_3000 = total_xs_embedding_probe_3000.mm(total_xs_embedding_gallery_3000.t())
_, pred_pos_3000 = logits_3000.topk(1, dim=1, largest=True, sorted=True)
pred_pos_3000 = pred_pos_3000.squeeze()
pred_labels_3000 = torch.zeros_like(gt_labels_gallery_3000)
for i in range(len(pred_labels_3000)):
pred_labels_3000[i] = gt_labels_gallery_3000[pred_pos_3000[i]]
setting3000_id_rank1_acc = accuracy_percent(gt_labels_probe_3000, pred_labels_3000)
logits_10000 = total_xs_embedding_probe_10000.mm(total_xs_embedding_gallery_10000.t())
_, pred_pos_10000 = logits_10000.topk(1, dim=1, largest=True, sorted=True)
pred_pos_10000 = pred_pos_10000.squeeze()
pred_labels_10000 = torch.zeros_like(gt_labels_gallery_10000)
for i in range(len(pred_labels_10000)):
pred_labels_10000[i] = gt_labels_gallery_10000[pred_pos_10000[i]]
setting10000_id_rank1_acc = accuracy_percent(gt_labels_probe_10000, pred_labels_10000)
#############
with open(log_file, 'a+') as f:
f.write('Test on MORPH2 dataset --{} mix-- setting3000_id_rank1_acc:{:.2f}%, '\
'setting10000_id_rank1_acc:{:.2f}%\n'.format(
crops_mix_mode, setting3000_id_rank1_acc, setting10000_id_rank1_acc))
def set_train_mode_1(model, state):
set_grads(model.module.backbone_id, state)
set_grads(model.module.margin_fc, state)
set_grads(model.module.clubmi, not state)
def set_train_mode_2(model, state):
set_grads(model.module.backbone_ae, state)
set_grads(model.module.age_estimator, state)
def set_grads(module, state):
for para in module.parameters():
para.requires_grad = state
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main()
```
|
{
"source": "Jeffery1874/sample-code",
"score": 3
}
|
#### File: python/pytest/helpers.py
```python
import os
def ensure_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def take_screenhot_and_logcat(driver, device_logger, calling_request):
logcat_dir = device_logger.logcat_dir
screenshot_dir = device_logger.screenshot_dir
driver.save_screenshot(os.path.join(screenshot_dir, calling_request + ".png"))
logcat_file = open(os.path.join(logcat_dir, calling_request + "_logcat.log"), 'wb')
logcat_data = driver.get_log('logcat')
for data in logcat_data:
data_string = str(data['timestamp']) + ": " + str(data['message'])
logcat_file.write((data_string + '\n').encode("UTF-8"))
logcat_file.close()
```
|
{
"source": "jefferycline1/Metallicity_Stack_Commons",
"score": 2
}
|
#### File: Metallicity_Stack_Commons/Metallicity_Stack_Commons/logging.py
```python
import sys
from os.path import join
from platform import uname
from getpass import getuser
from socket import gethostname
from requests import get
import logging
log_format = '%(asctime)s %(levelname)7s - %(module)21s %(funcName)23s : %(message)s'
formatter = logging.Formatter(log_format, "%H:%M:%S")
file_formatter = logging.Formatter(log_format, "%H:%M:%S")
class LogClass:
"""
Main class to log information to stdout and ASCII logfile
Note: This code is identical to the one used in ReQUIAM:
https://github.com/ualibraries/ReQUIAM
To use:
log = LogClass(log_dir, logfile).get_logger()
:param log_dir: Relative path for exported logfile directory
:param logfile: Filename for exported log file
"""
def __init__(self, log_dir: str, logfile: str):
self.LOG_FILENAME = join(log_dir, logfile)
def get_logger(self) -> logging.Logger:
file_log_level = logging.DEBUG # This is for file logging
log = logging.getLogger("main_logger")
if not log.handlers:
log.setLevel(file_log_level)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO) # Only at INFO level
sh.setFormatter(formatter)
log.addHandler(sh)
fh = logging.FileHandler(self.LOG_FILENAME)
fh.setLevel(file_log_level)
fh.setFormatter(file_formatter)
log.addHandler(fh)
log.handler_set = True
log.propagate = False
return log
def log_stdout() -> logging.Logger:
"""
Returns stdout logging object
"""
log_level = logging.INFO
log = logging.getLogger("stdout_logger")
if not log.handlers:
log.setLevel(log_level)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
log.addHandler(sh)
log.handler_set = True
log.propagate = False
return log
def get_user_hostname() -> dict:
"""
Retrieve user, hostname, IP, and OS configuration
:return: Dictionary with 'user' 'hostname' and 'ip' keys
"""
sys_info = dict()
sys_info['user'] = getuser()
sys_info['hostname'] = gethostname()
sys_info['ip'] = get('https://api.ipify.org').text
os_name = uname()
sys_info['os'] = f"{os_name[0]} {os_name[2]} {os_name[3]}"
return sys_info
def log_verbose(log: logging.Logger,
message: str, verbose: bool = False):
"""
Log message depending on verbosity
:param log: logging.Logger object
:param message: Message
:param verbose: Write verbose message to stdout. Default: file only
"""
if verbose:
log.info(message) # Write to stdout
else:
log.debug(message) # Write only to file via debug level
```
#### File: Metallicity_Stack_Commons/tests/test_attenuation.py
```python
import numpy as np
from os.path import join
from Metallicity_Stack_Commons.analysis import attenuation
from Metallicity_Stack_Commons import line_name_short
from chun_codes import random_pdf
import pytest
def test_compute_EBV():
# Test Error handling when list is provided
with pytest.raises(TypeError):
attenuation.compute_EBV([0.45, 0.38], verbose=True)
# Test Error handling when source is incorrect
with pytest.raises(KeyError):
attenuation.compute_EBV(0.45, source='test', verbose=True)
dx = 0.05 # This is for randomization
offsets = [0, -0.01, 0.01]
Balmer_list = [attenuation.HgHb_CaseB, attenuation.HdHb_CaseB]
source_list = ['HgHb', 'HdHb']
zip_data = zip(Balmer_list, source_list)
for value, source in zip_data:
for zero in [False, True]:
for offset in offsets:
# Test float input
Balmer = value + offset
EBV = attenuation.compute_EBV(Balmer, source=source,
zero_neg=zero, verbose=True)
assert isinstance(EBV, float)
if offset == 0:
assert EBV == 0
if offset < 0:
assert EBV > 0
if offset > 0:
if not zero:
assert EBV < 0
else:
assert EBV == 0
# Test numpy array with single record
EBV = attenuation.compute_EBV(np.array([Balmer]),
source=source, zero_neg=zero,
verbose=True)
assert isinstance(EBV, (np.ndarray, np.generic))
if offset == 0:
assert EBV == 0
if offset < 0:
assert EBV > 0
if offset > 0:
if not zero:
assert EBV < 0
else:
assert EBV == 0
# Test EBV distribution case
values = [value, value - dx, value + dx]
Balmer_dist = random_pdf(values, [dx] * len(values), seed_i=1,
n_iter=5000)
if not zero:
EBV_dist = attenuation.compute_EBV(Balmer_dist, source=source,
zero_neg=zero, verbose=True)
else:
EBV_dist, EBV_peak = attenuation.compute_EBV(Balmer_dist,
source=source,
zero_neg=zero,
verbose=True)
'''
# For writing initial file
npz_outfile = join('tests_data', f'EBV_dist_{source}_{zero}.npz')
print(f"Writing : {npz_outfile}")
np.savez(npz_outfile, EBV_dist=EBV_dist)
'''
assert isinstance(EBV_dist, (np.ndarray, np.generic))
# Read in reference data
npz_infile = join('tests_data', f'EBV_dist_{source}_{zero}.npz')
print(f"Reading : {npz_infile}")
npz_reference = np.load(npz_infile)
# assert np.array_equal(EBV_dist, npz_reference['EBV_dist'])
def test_compute_A():
for EBV in [0.0, 0.25]:
A_dict = attenuation.compute_A(EBV, verbose=True)
assert isinstance(A_dict, dict)
for key in A_dict.keys():
if EBV == 0:
assert A_dict[key] == 0.0
else:
assert A_dict[key] > 0
def test_line_ratio_atten():
ratio = 2.0
for EBV in [0.0, 0.25]:
# [OII]/H-beta
ratio_atten = attenuation.line_ratio_atten(ratio, EBV,
line_name_short['OII'],
line_name_short['HB'],
verbose=True)
assert isinstance(ratio_atten, float)
if EBV == 0:
assert ratio_atten == ratio
else:
assert ratio_atten > ratio
# [OIII]/[OII]
ratio_atten = attenuation.line_ratio_atten(ratio, EBV,
line_name_short['OIII'],
line_name_short['OII'],
verbose=True)
assert isinstance(ratio_atten, float)
if EBV == 0:
assert ratio_atten == ratio
else:
assert ratio_atten < ratio
def test_Hb_SFR():
logSFR = attenuation.Hb_SFR(41.0, 0.25, verbose=True)
assert isinstance(logSFR, float)
```
|
{
"source": "jefferydalton/programmingchallanges",
"score": 3
}
|
#### File: 1/python/1.py
```python
def wordcount(content):
return len(str.split(content))
with open('../../../common/shakespeare.txt','rt') as fh:
print('Words:', wordcount(fh.read()))
```
|
{
"source": "jeffery-do/Vizdoombot",
"score": 3
}
|
#### File: Vizdoombot/bot/reward.py
```python
from vizdoom import GameVariable
class RewardCalculator():
def __init__(self):
self.running_total = 0
def calc_reward(self, game):
# Assume Action Performed
cur_reward = -1
# Kills
cur_killcount = game.get_game_variable(GameVariable.KILLCOUNT)
new_kills = cur_killcount - self.prev_killcount
if new_kills > 0:
print("KILLED ITTTTTTT")
print("KILLED ITTTTTTT")
print("KILLED ITTTTTTT")
print("KILLED ITTTTTTT")
print("KILLED ITTTTTTT")
print("KILLED ITTTTTTT")
print("KILLED ITTTTTTT")
print("KILLED ITTTTTTT")
print("KILLED ITTTTTTT")
cur_reward += 1000 * new_kills
# Health
cur_health = game.get_game_variable(GameVariable.HEALTH)
diff_health = cur_health - self.prev_health
if diff_health > 0:
cur_reward += 10 * diff_health
elif diff_health < 0:
cur_reward += 20 * diff_health
# Ammo
cur_ammo = game.get_game_variable(GameVariable.SELECTED_WEAPON_AMMO)
diff_ammo = cur_ammo - self.prev_ammo
if diff_ammo > 0:
cur_reward += 10 * diff_ammo
elif diff_ammo < 0:
cur_reward += 100 * diff_ammo
# Store This State
self.prev_killcount = cur_killcount
self.prev_health = cur_health
self.prev_ammo = cur_ammo
# Return Running Total
self.running_total += cur_reward
return cur_reward
def get_total_reward(self):
return self.running_total
def reset(self, game):
self.prev_killcount = game.get_game_variable(GameVariable.KILLCOUNT)
self.prev_health = game.get_game_variable(GameVariable.HEALTH)
self.prev_ammo = game.get_game_variable(GameVariable.SELECTED_WEAPON_AMMO)
self.running_total = 0
```
#### File: examples/python/learning_tf.py
```python
from __future__ import division
from __future__ import print_function
from vizdoom import *
import itertools as it
import pickle
from random import sample, randint, random
from time import time, sleep
import numpy as np
import skimage.color, skimage.transform
import tensorflow as tf
from tqdm import trange
# Q-learning settings
learning_rate = 0.00025
# learning_rate = 0.0001
discount_factor = 0.99
epochs = 20
learning_steps_per_epoch = 2000
replay_memory_size = 10000
# NN learning settings
batch_size = 64
# Training regime
test_episodes_per_epoch = 100
# Other parameters
frame_repeat = 12
resolution = (30, 45)
episodes_to_watch = 10
model_savefile = "/tmp/model.ckpt"
save_model = True
load_model = False
skip_learning = False
# Configuration file path
config_file_path = "../../examples/config/simpler_basic.cfg"
# config_file_path = "../../examples/config/rocket_basic.cfg"
# config_file_path = "../../examples/config/basic.cfg"
# Converts and down-samples the input image
def preprocess(img):
img = skimage.transform.resize(img, resolution)
img = img.astype(np.float32)
return img
class ReplayMemory:
def __init__(self, capacity):
channels = 1
state_shape = (capacity, resolution[0], resolution[1], channels)
self.s1 = np.zeros(state_shape, dtype=np.float32)
self.s2 = np.zeros(state_shape, dtype=np.float32)
self.a = np.zeros(capacity, dtype=np.int32)
self.r = np.zeros(capacity, dtype=np.float32)
self.isterminal = np.zeros(capacity, dtype=np.float32)
self.capacity = capacity
self.size = 0
self.pos = 0
def add_transition(self, s1, action, s2, isterminal, reward):
self.s1[self.pos, :, :, 0] = s1
self.a[self.pos] = action
if not isterminal:
self.s2[self.pos, :, :, 0] = s2
self.isterminal[self.pos] = isterminal
self.r[self.pos] = reward
self.pos = (self.pos + 1) % self.capacity
self.size = min(self.size + 1, self.capacity)
def get_sample(self, sample_size):
i = sample(range(0, self.size), sample_size)
return self.s1[i], self.a[i], self.s2[i], self.isterminal[i], self.r[i]
def create_network(session, available_actions_count):
# Create the input variables
s1_ = tf.placeholder(tf.float32, [None] + list(resolution) + [1], name="State")
a_ = tf.placeholder(tf.int32, [None], name="Action")
target_q_ = tf.placeholder(tf.float32, [None, available_actions_count], name="TargetQ")
# Add 2 convolutional layers with ReLu activation
conv1 = tf.contrib.layers.convolution2d(s1_, num_outputs=8, kernel_size=[6, 6], stride=[3, 3],
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
biases_initializer=tf.constant_initializer(0.1))
conv2 = tf.contrib.layers.convolution2d(conv1, num_outputs=8, kernel_size=[3, 3], stride=[2, 2],
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
biases_initializer=tf.constant_initializer(0.1))
conv2_flat = tf.contrib.layers.flatten(conv2)
fc1 = tf.contrib.layers.fully_connected(conv2_flat, num_outputs=128, activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.1))
q = tf.contrib.layers.fully_connected(fc1, num_outputs=available_actions_count, activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.1))
best_a = tf.argmax(q, 1)
loss = tf.contrib.losses.mean_squared_error(q, target_q_)
optimizer = tf.train.RMSPropOptimizer(learning_rate)
# Update the parameters according to the computed gradient using RMSProp.
train_step = optimizer.minimize(loss)
def function_learn(s1, target_q):
feed_dict = {s1_: s1, target_q_: target_q}
l, _ = session.run([loss, train_step], feed_dict=feed_dict)
return l
def function_get_q_values(state):
return session.run(q, feed_dict={s1_: state})
def function_get_best_action(state):
return session.run(best_a, feed_dict={s1_: state})
def function_simple_get_best_action(state):
return function_get_best_action(state.reshape([1, resolution[0], resolution[1], 1]))[0]
return function_learn, function_get_q_values, function_simple_get_best_action
def learn_from_memory():
""" Learns from a single transition (making use of replay memory).
s2 is ignored if s2_isterminal """
# Get a random minibatch from the replay memory and learns from it.
if memory.size > batch_size:
s1, a, s2, isterminal, r = memory.get_sample(batch_size)
q2 = np.max(get_q_values(s2), axis=1)
target_q = get_q_values(s1)
# target differs from q only for the selected action. The following means:
# target_Q(s,a) = r + gamma * max Q(s2,_) if isterminal else r
target_q[np.arange(target_q.shape[0]), a] = r + discount_factor * (1 - isterminal) * q2
learn(s1, target_q)
def perform_learning_step(epoch):
""" Makes an action according to eps-greedy policy, observes the result
(next state, reward) and learns from the transition"""
def exploration_rate(epoch):
"""# Define exploration rate change over time"""
start_eps = 1.0
end_eps = 0.1
const_eps_epochs = 0.1 * epochs # 10% of learning time
eps_decay_epochs = 0.6 * epochs # 60% of learning time
if epoch < const_eps_epochs:
return start_eps
elif epoch < eps_decay_epochs:
# Linear decay
return start_eps - (epoch - const_eps_epochs) / \
(eps_decay_epochs - const_eps_epochs) * (start_eps - end_eps)
else:
return end_eps
s1 = preprocess(game.get_state().screen_buffer)
# With probability eps make a random action.
eps = exploration_rate(epoch)
if random() <= eps:
a = randint(0, len(actions) - 1)
else:
# Choose the best action according to the network.
a = get_best_action(s1)
reward = game.make_action(actions[a], frame_repeat)
isterminal = game.is_episode_finished()
s2 = preprocess(game.get_state().screen_buffer) if not isterminal else None
# Remember the transition that was just experienced.
memory.add_transition(s1, a, s2, isterminal, reward)
learn_from_memory()
# Creates and initializes ViZDoom environment.
def initialize_vizdoom(config_file_path):
print("Initializing doom...")
game = DoomGame()
game.load_config(config_file_path)
game.set_window_visible(False)
game.set_mode(Mode.PLAYER)
game.set_screen_format(ScreenFormat.GRAY8)
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.init()
print("Doom initialized.")
return game
if __name__ == '__main__':
# Create Doom instance
game = initialize_vizdoom(config_file_path)
# Action = which buttons are pressed
n = game.get_available_buttons_size()
actions = [list(a) for a in it.product([0, 1], repeat=n)]
# Create replay memory which will store the transitions
memory = ReplayMemory(capacity=replay_memory_size)
session = tf.Session()
learn, get_q_values, get_best_action = create_network(session, len(actions))
saver = tf.train.Saver()
if load_model:
print("Loading model from: ", model_savefile)
saver.restore(session, model_savefile)
else:
init = tf.initialize_all_variables()
session.run(init)
print("Starting the training!")
time_start = time()
if not skip_learning:
for epoch in range(epochs):
print("\nEpoch %d\n-------" % (epoch + 1))
train_episodes_finished = 0
train_scores = []
print("Training...")
game.new_episode()
for learning_step in trange(learning_steps_per_epoch):
perform_learning_step(epoch)
if game.is_episode_finished():
score = game.get_total_reward()
train_scores.append(score)
game.new_episode()
train_episodes_finished += 1
print("%d training episodes played." % train_episodes_finished)
train_scores = np.array(train_scores)
print("Results: mean: %.1f±%.1f," % (train_scores.mean(), train_scores.std()), \
"min: %.1f," % train_scores.min(), "max: %.1f," % train_scores.max())
print("\nTesting...")
test_episode = []
test_scores = []
for test_episode in trange(test_episodes_per_epoch):
game.new_episode()
while not game.is_episode_finished():
state = preprocess(game.get_state().screen_buffer)
best_action_index = get_best_action(state)
game.make_action(actions[best_action_index], frame_repeat)
r = game.get_total_reward()
test_scores.append(r)
test_scores = np.array(test_scores)
print("Results: mean: %.1f±%.1f," % (
test_scores.mean(), test_scores.std()), "min: %.1f" % test_scores.min(),
"max: %.1f" % test_scores.max())
print("Saving the network weigths to:", model_savefile)
saver.save(session, model_savefile)
# pickle.dump(get_all_param_values(net), open('weights.dump', "wb"))
print("Total elapsed time: %.2f minutes" % ((time() - time_start) / 60.0))
game.close()
print("======================================")
print("Training finished. It's time to watch!")
# Reinitialize the game with window visible
game.set_window_visible(True)
game.set_mode(Mode.ASYNC_PLAYER)
game.init()
for _ in range(episodes_to_watch):
game.new_episode()
while not game.is_episode_finished():
state = preprocess(game.get_state().screen_buffer)
best_action_index = get_best_action(state)
# Instead of make_action(a, frame_repeat) in order to make the animation smooth
game.set_action(actions[best_action_index])
for _ in range(frame_repeat):
game.advance_action()
# Sleep between episodes
sleep(1.0)
score = game.get_total_reward()
print("Total score: ", score)
```
|
{
"source": "jeffery-k/hangouts_payment_tracker",
"score": 3
}
|
#### File: jeffery-k/hangouts_payment_tracker/tracker.py
```python
import json
import argparse
import gspread
import time
from oauth2client.service_account import ServiceAccountCredentials
CONFIG_FILE_NAME = "cfg/config.json"
CONFIG = json.load(open(CONFIG_FILE_NAME))
# Wrapper class for gspread.models.Spreadsheet
class SheetData:
def __init__(self, data):
self.data = data
# Ensures most recent dates are in sheet
def updateDates(self):
cur_time = time.time()
# Returns a map of date ranges to arrays of users with vacant payments
def getVacancies(self):
pass
# Returns a map of date ranges to arrays of users with unverified payments
def getUnverified(self):
pass
# Set the user status for the current period as unverified
def submitPayment(self):
pass
#TODO
# Verifies a payment via payment code
def verifyPayment(self, code):
pass
# Returns row with provided time bounded by the row start and end date
def findRow(self, time):
pass
# Returns a list of rows in data
def getRows(self):
pass
# Returns value of entry in data given key (column name) and row index
def getEntry(self, row_index, key):
pass
# Sets the value of entry in data given key (column name) and row index
def setEntry(self, row_index, key, value):
pass
# Starts payment tracking service
def start(sheet_data):
pass
# Submits a payment for the calling user for the current period
def pay(sheet_data):
pass
# Registers the calling user into the dataset
def register(sheet_data):
pass
# Gets data from payment sheet
def getData():
credentials_file_name = CONFIG["credentialsFileName"]
scope = ['https://www.googleapis.com/auth/spreadsheets', "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name(credentials_file_name, scope)
client = gspread.authorize(creds)
sheet = client.open("Payments").sheet1
return SheetData(sheet)
# Gets a string representation of a date
def toString(date):
pass
# Gets a date from a string representation
def toDate(string):
pass
# Returns calling user
def getCaller():
pass
# Main method
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--start", action="store_true",
help="Starts payment tracking service")
parser.add_argument("--pay", action="store_true",
help="Submits a payment for the calling user for the current period")
parser.add_argument("--register", action="store_true",
help="Registers the calling user into the dataset")
sheet_data = getData()
args = parser.parse_args()
if args.start:
start(sheet_data)
elif args.pay:
pay(sheet_data)
elif args.register:
register(sheet_data)
else:
parser.print_help()
if __name__ == "__main__":
main()
```
|
{
"source": "jeffery-k/where_is_my_money",
"score": 3
}
|
#### File: where_is_my_money/src/dao.py
```python
import gspread
import time
from oauth2client.service_account import ServiceAccountCredentials
#TODO
class Database:
INITIAL_FUND = "initial_fund"
INITIAL_DATE = "initial_date"
INTEREST_RATE = "interest_rate"
INTEREST_TYPE = "interest_type"
DEPOSIT_INTERVAL = "deposit_interval"
DEPOSIT_ACCOUNT = "deposit_account"
def __init__(self, credentials):
self.credentials = credentials
#TODO grab data from spreadsheet
ting = 3
def get_fund_data(self):
return {
"AccountA": {
Database.INITIAL_FUND: "1000.00",
Database.INITIAL_DATE: str(time.time() - 8640000),
Database.INTEREST_RATE: "0.10",
Database.INTEREST_TYPE: "apr",
Database.DEPOSIT_INTERVAL: "86400",
Database.DEPOSIT_ACCOUNT: "AccountA"
}
}
```
|
{
"source": "JefferyLJ/ScriptEditor",
"score": 2
}
|
#### File: JefferyLJ/ScriptEditor/lexer.py
```python
from PyQt5.Qsci import QsciLexerCustom
from PyQt5.QtGui import QColor, QFont
import re
class Lexer(QsciLexerCustom):
def __init__(self, parent):
super(Lexer, self).__init__(parent)
# Default text settings
# ----------------------
self.setDefaultColor(QColor("#ff000000"))
self.setDefaultPaper(QColor("#ffffffff"))
self.setDefaultFont(QFont("Consolas", 14))
# Initialize colors per style
# ----------------------------
self.setColor(QColor("#ff000000"), 0) # Style 0: black
self.setColor(QColor("#ff7f0000"), 1) # Style 1: red
self.setColor(QColor("#ff0000bf"), 2) # Style 2: blue
# Initialize paper colors per style
# ----------------------------------
self.setPaper(QColor("#ffffffff"), 0) # Style 0: white
self.setPaper(QColor("#ffffffff"), 1) # Style 1: white
self.setPaper(QColor("#ffffffff"), 2) # Style 2: white
# Initialize fonts per style
# ---------------------------
self.setFont(QFont("Consolas", 14, weight=QFont.Bold), 0) # Style 0: 14pt bold
self.setFont(QFont("Consolas", 14, weight=QFont.Bold), 1) # Style 1: 14pt bold
self.setFont(QFont("Consolas", 14, weight=QFont.Bold), 2) # Style 2: 14pt bold
editor = self.parent()
editor.SendScintilla(editor.SCI_STYLESETHOTSPOT, 1, True)
editor.setHotspotUnderline(True)
editor.SCN_HOTSPOTCLICK.connect(self.hotsport)
# editor.setHotspotForegroundColor(QColor("# ffcf4444"))
# editor.setHotspotBackgroundColor(QColor("# ffaaaaaa"))
# editor.SendScintilla(editor.SCI_SETHOTSPOTACTIVEBACK, True, 0xaaaaaa)
def hotsport(self, position, modifier):
print(position)
print(modifier)
def language(self):
[...]
def description(self, style):
if style == 0:
return "myStyle_0"
elif style == 1:
return "myStyle_1"
elif style == 2:
return "myStyle_2"
###
return ""
def styleText(self, start, end):
# 1. Initialize the styling procedure
# ------------------------------------
self.startStyling(start)
# 2. Slice out a part from the text
# ----------------------------------
text = self.parent().text()[start:end]
# 3. Tokenize the text
# ---------------------
p = re.compile(r"\s+|\w+|\W")
token_list = [(token, len(bytearray(token, "utf-8"))) for token in p.findall(text)]
# -> 'token_list' is a list of tuples: (token_name, token_len)
# 4. Style the text in a loop
# ----------------------------
# self.setStyling(number_of_chars, style_nr)
#
for i, token in enumerate(token_list):
if token[0] in ["for", "while", "return", "int", "include"]:
# Red style
self.setStyling(token[1], 1)
elif token[0] in ["(", ")", "{", "}", "[", "]", "#"]:
# Blue style
self.setStyling(token[1], 2)
else:
# Default style
self.setStyling(token[1], 0)
###
###
```
|
{
"source": "JefferyQ/Mechanic",
"score": 2
}
|
#### File: ui/windows/main.py
```python
from mechanic.ui.windows.base import BaseWindow
from mechanic.ui.tabs import *
class MechanicWindow(BaseWindow):
window_title = "Mechanic"
def __init__(self, *args, **kwargs):
self.toolbar.add(InstallTab)
self.toolbar.add(UpdatesTab)
self.toolbar.add(RegisterTab)
self.toolbar.add(SettingsTab)
```
#### File: lib/mechanic/update.py
```python
import time
import requests
from mechanic import env, logger
from mechanic.version import Version
from mechanic.storage import Storage
from mechanic.extension import Extension
class Update(object):
class ConnectionError(Exception): pass
@classmethod
def last_checked(cls):
return Storage.get('last_checked_at') or 0
@classmethod
def checked_recently(cls):
return cls.last_checked() > time.time() - env.updates_cache_interval
@classmethod
def all(cls, force=False, skip_patches=False):
if force or not cls.checked_recently():
updates = cls._fetch_updates()
else:
updates = cls._get_cached()
if skip_patches:
updates = filter(cls._filter_patch_updates, updates)
return updates
@classmethod
def _fetch_updates(cls):
logger.info("Fetching updates...")
try:
updates = [e for e in Extension.all() if e.should_update]
except requests.ConnectionError:
raise Update.ConnectionError
cls._set_cached(updates)
Storage.set('last_checked_at', time.time())
return updates
@classmethod
def _get_cached(cls):
logger.info("Fetching cached updates...")
cache = Storage.get('update_cache')
extensions = [Extension(name=name) for name, _ in cache.items()]
for extension in extensions:
if extension.is_installed and extension.is_configured:
extension.remote.version = Version(cache[extension.name])
return extensions
@classmethod
def _set_cached(cls, extensions):
cache = {e.filename : str(e.remote.version) for e in extensions}
Storage.set('update_cache', cache)
@classmethod
def _filter_patch_updates(cls, extension):
local = extension.version
remote = extension.remote.version
return remote.major > local.major or remote.minor > remote.minor
```
#### File: mechanic/github/downloader.py
```python
import os
import requests
import certifi
import tempfile
import fnmatch
from zipfile import ZipFile
ZIP_URL = "https://github.com/%(repository)s/archive/master.zip"
class GithubDownloader(object):
def __init__(self, repository, target):
self.repository = repository
self.target = target
self.zip = self.download(self.zip_url)
def download(self, url):
tmp_dir = tempfile.mkdtemp()
filepath = os.path.join(tmp_dir, os.path.basename(url))
with open(filepath, "wb") as download:
stream = requests.get(url, stream=True, verify=certifi.where())
chunks = stream.iter_content(chunk_size=8192)
for content in chunks:
self.download_chunk(download, content)
stream.close()
return filepath
def download_chunk(self, file, content):
file.write(content)
def extract(self):
"""Extract downloaded zip file and return extension path."""
destination = os.path.dirname(self.zip)
ZipFile(self.zip).extractall(destination)
os.remove(self.zip)
match = '*%s' % self.target
matches = []
for root, dirnames, _ in os.walk(destination):
for dirname in fnmatch.filter(dirnames, '*.roboFontExt'):
matches.append(os.path.join(root, dirname))
exact = fnmatch.filter(matches, match)
return (exact and exact[0]) or None
@property
def zip_url(self):
return ZIP_URL % {'repository': self.repository}
```
#### File: mechanic/observers/update.py
```python
from mechanic import logger
from mechanic.bus import Bus
from mechanic.threaded import Threaded
from mechanic.update import Update
from mechanic.observer import Observer
from mechanic.storage import Storage
class UpdateObserver(Observer):
"""Observe application launch to check for updates"""
def __init__(self, *events):
self.add('check_for_updates_in_thread', *events)
def check_for_updates_in_thread(self, info):
Threaded(self).check_for_updates()
def check_for_updates(self):
"""Open updates window unless ran in last hour"""
if self.should_check_for_updates():
try:
skip_patches = bool(Storage.get('ignore_patch_updates'))
updates = Update.all(force=True, skip_patches=skip_patches)
except Update.ConnectionError:
logger.info("Couldn't connect to the internet")
return
if updates:
logger.info("%d new updates found", len(updates))
Bus().emit("newUpdatesFound", updates)
else:
logger.info("No new updates found")
else:
logger.info("Skipping a check for new updates")
def should_check_for_updates(self):
return bool(Storage.get('check_on_startup')) and \
not Update.checked_recently()
```
#### File: ui/formatters/version.py
```python
from AppKit import NSFormatter, NSNull
class VersionFormatter(NSFormatter):
def stringForObjectValue_(self, obj):
if obj is None or isinstance(obj, NSNull):
return ''
return str(obj)
```
#### File: ui/tabs/register.py
```python
import os
import requests
from vanilla import *
from vanilla.dialogs import getFile
from mechanic import env
from mechanic.extension import Extension
from mechanic.registry import Registry
from mechanic.ui.fields.text_field import TextField
from mechanic.ui.text import Text
from mechanic.ui.tabs.base import BaseTab
class RegisterTab(BaseTab):
title = "Register"
image = "toolbarScriptOpen"
identifier = "register"
tab_size = (500, 240)
max_tab_size = (500, 240)
explanation = Text.string(text="Your name and the description of your extension will be based on the name/username and repository description on GitHub. Make sure these are set accordingly before registering your extension.", size=11)
def setup(self):
self.name = TextField((20, 20, -20),
"Name",
placeholder="My Extension")
self.filename = TextField((20, 60, -20),
"Filename",
placeholder="MyExtension.roboFontExt")
self.repository = TextField((20, 100, -20),
"Repository",
placeholder="username/MyExtension")
self.explanatory_text = TextBox((TextField.indent + 20, 135, -20, 50),
self.explanation)
self.import_button = Button((-250, -42, 80, 20),
"Import",
callback=self.get_extension)
self.register_button = Button((-160, -42, 140, 20),
"Register",
callback=self.register)
def activate(self):
self.set_default_button(self.register_button)
def get_extension(self, sender):
getFile(fileTypes=['roboFontExt'],
parentWindow=self.parent.w,
resultCallback=self.import_extension)
def import_extension(self, file):
extension = Extension(path=file[0])
if extension.bundle.bundleExists():
self.name.set(extension.bundle.name)
self.filename.set(extension.filename)
self.repository.set(extension.repository)
def register(self, sender):
self.progress = self.start_progress('Sending to registry server...')
try:
registry = Registry(env.default_registry)
response = registry.add(name=self.name.get(),
filename=self.filename.get(),
repository=self.repository.get())
self.progress.close()
response.raise_for_status()
self.show_notification_sheet('%s was added.' % self.name.get())
self.clear()
except requests.exceptions.HTTPError as e:
errors = response.json()['error']
if isinstance(errors, basestring): errors = [errors]
errors = map(lambda e: '%s.' % e.capitalize(), errors)
self.show_notification_sheet('\n'.join(errors), size=(300,len(errors)*22 + 60))
except requests.exceptions.ConnectionError:
self.progress.close()
self.show_connection_error_sheet()
def clear(self):
self.name.set('')
self.filename.set('')
self.repository.set('')
```
|
{
"source": "jefferyUstc/QuLab-DataManager",
"score": 2
}
|
#### File: QuLab-DataManager/datasearch/models.py
```python
from django.db import models
class Article(models.Model):
p_id = models.CharField(max_length=15, primary_key=True, verbose_name="PubMed ID")
article_name = models.CharField(max_length=255, verbose_name="Article")
journal = models.CharField(max_length=255)
pub_time = models.DateField(verbose_name="Published Time")
up_user = models.CharField(max_length=50, verbose_name="Upload User")
def __str__(self):
return self.article_name
class Annotation(models.Model):
control = models.BooleanField(verbose_name='Whether the experiment has control samples?')
sample = models.BooleanField(verbose_name='Whether each cell or data has sample information?')
gender = models.BooleanField(verbose_name='Whether the data has gender annotation?')
age = models.BooleanField(verbose_name='Whether the data has age annotation?')
clinical = models.BooleanField(verbose_name='Whether the data has clinical information annotation?')
summary = models.TextField(null=True)
article = models.OneToOneField(Article, on_delete=models.CASCADE)
def __str__(self):
return str(self.article.article_name)
class CellType(models.Model):
parent = models.ForeignKey("self", on_delete=models.CASCADE, null=True, blank=True, related_name='children', default=9)
type = models.CharField(max_length=100, unique=True)
def __str__(self):
return self.type
def get_cell_type_all(self):
return self.objects.get(type='All')
class Metadata(models.Model):
s_id = models.AutoField(primary_key=True)
s_summary = models.TextField(verbose_name='Summary', null=True)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
def __str__(self):
return self.article.article_name
class Data(models.Model):
MAJOR_TYPE = [
('TCD', 'T cell development'),
('RDA', 'Retina and dark adaptation'),
('DN', 'Decidua NK'),
('AD', 'Autoimmune disease'),
('MI', 'Mitosis'),
('BT', 'Brain tumor'),
('OT', 'Others'),
]
ANI_MODEL = [
('DM', 'Disease Model'),
('AM', 'Animal Model'),
('MO', 'Model Organism'),
]
DATA_TYPE = [
('scRNA', 'Single-cell RNA'),
('scATAC', 'Single-cell ATAC'),
('bkRNA', 'Bulk RNA'),
('bkATAC', 'Bulk ATAC'),
('bkChip', 'Bulk Chip'),
]
id = models.AutoField(primary_key=True)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
data_class = models.CharField(max_length=3, choices=MAJOR_TYPE)
species = models.CharField(max_length=50)
ani_model = models.CharField(max_length=2, choices=ANI_MODEL, verbose_name='Disease/Animal Model/Model Organism')
disease_organ = models.CharField(max_length=100, verbose_name='Disease Type/Organ Type')
cell_type = models.ManyToManyField(CellType, verbose_name='Cell Type')
data_type = models.CharField(max_length=50, verbose_name='Data Type')
tech_type = models.CharField(max_length=100, null=True, verbose_name='Sequencing Technology')
seq_platform = models.CharField(max_length=100, null=True, verbose_name='Sequencing Platform')
n_samples = models.CharField(max_length=50, null=True, verbose_name='Number of Cells or Samples')
data_format = models.CharField(max_length=20, verbose_name='Data Format')
def __str__(self):
return str(self.article.article_name)
```
|
{
"source": "JefferyWangSH/dos-solver",
"score": 3
}
|
#### File: dos-solver/src/grids.py
```python
import numpy as np
from momentum import LatticeMomentum
class FrequencyGrids:
"""
Grids of real frequnecy (1d)
"""
def __init__(self, num_of_grids, freqency_range) -> None:
assert(isinstance(num_of_grids, int))
assert(isinstance(freqency_range, list))
assert(len(freqency_range) == 2)
assert(freqency_range[0] < freqency_range[1])
self._num_of_grids = num_of_grids
self._min_frequency = freqency_range[0]
self._max_frequency = freqency_range[1]
self._grids = np.linspace(self._min_frequency, self._max_frequency, self._num_of_grids)
def __getitem__(self, index) -> float:
assert(isinstance(index, int))
assert(index >= 0 and index < self.GridsNum())
return self.Grids()[index]
def Grids(self):
return self._grids
def Min(self) -> float:
return self._min_frequency
def Max(self) -> float:
return self._max_frequency
def GridsNum(self) -> int:
return self._num_of_grids
class MomentumGrids:
"""
Grids in two-dimensional 1st Brillouin zone
"""
def __init__(self, num_of_grids_per_length) -> None:
assert(isinstance(num_of_grids_per_length, int))
self._num_of_grids_per_length = num_of_grids_per_length
self._num_of_grids = self._num_of_grids_per_length**2
self._grids_interval_per_length = 2*np.pi / self._num_of_grids_per_length
self._momentum_grids = np.array( [self.Grid2Momentum(id) for id in range(self._num_of_grids)] )
def __getitem__(self, index) -> LatticeMomentum:
assert(isinstance(index, int))
assert(index >= 0 and index < self.GridsNum())
return self.MomentumGrids()[index]
def GridsNumPerLength(self) -> int:
return self._num_of_grids_per_length
def GridsNum(self) -> int:
return self._num_of_grids
def GridsInterval(self) -> float:
return self._grids_interval_per_length
def MomentumGrids(self):
return self._momentum_grids
"""
Convert grid index to specific 2d momentum k in the 1st BZ
"""
def Grid2Momentum(self, grid_id) -> LatticeMomentum:
assert(isinstance(grid_id, int))
assert(grid_id >= 0 and grid_id < self.GridsNum())
kx = grid_id // self.GridsNumPerLength()
ky = grid_id % self.GridsNumPerLength()
return LatticeMomentum( np.array([-np.pi, -np.pi]) + self.GridsInterval() * np.array([kx, ky]) )
```
#### File: dos-solver/src/model.py
```python
import numpy as np
from dos_params import DosParams
from grids import FrequencyGrids, MomentumGrids
class FreePropagator:
"""
Free Feynman propagator
"""
def __init__(self, frequency_grids, momentum_grids):
assert(isinstance(frequency_grids, FrequencyGrids))
assert(isinstance(momentum_grids, MomentumGrids))
self._freq_grids = frequency_grids
self._momentum_grids = momentum_grids
self._arrary2d_particle = np.zeros((self.MomentumDim(), self.FrequencyDim()), dtype=complex)
self._arrary2d_hole = np.zeros((self.MomentumDim(), self.FrequencyDim()), dtype=complex)
def SetFreeBand(self, free_band_func) -> None:
self._free_band = free_band_func
def compute(self, dos_params):
assert(isinstance(dos_params, DosParams))
# use broadcast property of numpy and accelate the creation of free propagator matrix
kx_trans = np.array(np.mat([ k[0] for k in self._momentum_grids.MomentumGrids() ]).transpose())
ky_trans = np.array(np.mat([ k[1] for k in self._momentum_grids.MomentumGrids() ]).transpose())
tmp_ek = self._free_band(kx_trans, ky_trans, dos_params)
tmp_omega = self._freq_grids.Grids() + dos_params.infinitesimal_imag * 1.0j
# the plus sign here indicates that,
# there exist a particle-hole symmetry in our model of phase-disordered supercondutivity.
self._arrary2d_hole = (tmp_omega + tmp_ek)**-1
self._arrary2d_particle = (tmp_omega - tmp_ek)**-1
def FrequencyDim(self) -> int:
return self._freq_grids.GridsNum()
def MomentumDim(self) -> int:
return self._momentum_grids.GridsNum()
def ParticleMat(self):
return self._arrary2d_particle
def HoleMat(self):
return self._arrary2d_hole
# TODO: reload [][] operator
class Kernel:
"""
Kernel between self energy and free Feynman propagator.
"""
def __init__(self, momentum_grids):
assert(isinstance(momentum_grids, MomentumGrids))
self._momentum_grids = momentum_grids
self._array2d = np.zeros((self.Dim(), self.Dim()))
def SetKernel(self, kernel_func) -> None:
self._kernel = kernel_func
# this step, the generation of kernel, should be the most computational expensive part of the program,
# in case of large size of lattice.
def compute(self, dos_params) -> None:
assert(isinstance(dos_params, DosParams))
# accelarate the fabrication of kernel by using operations between arrays.
# again, the broadcast property of numpy arrays is used.
tmp_px = np.array([ k.data()[0] for k in self._momentum_grids.MomentumGrids() ])
tmp_py = np.array([ k.data()[1] for k in self._momentum_grids.MomentumGrids() ])
tmp_kx = np.array(np.mat(tmp_px).transpose())
tmp_ky = np.array(np.mat(tmp_py).transpose())
self._array2d = self._kernel(tmp_kx, tmp_ky, tmp_px, tmp_py, dos_params)
def Dim(self) -> int:
return self._momentum_grids.GridsNum()
def Mat(self):
return self._array2d
# class SelfEnergy:
# pass
class GreenFunc:
"""
Retarded Green's function of interacting system,
evaluated by computing the self energy correction using pertubation theroy
"""
def __init__(self, kernel, free_propagator):
assert(isinstance(kernel, Kernel))
assert(isinstance(free_propagator, FreePropagator))
self._momentum_grids = free_propagator._momentum_grids
self._freq_grids = free_propagator._freq_grids
self._kernel = kernel
self._free_propagator = free_propagator
self._array2d_self_energy = np.zeros((self._free_propagator.MomentumDim(), self._free_propagator.FrequencyDim()), dtype=complex)
self._array2d_green_func = np.zeros((self._free_propagator.MomentumDim(), self._free_propagator.FrequencyDim()), dtype=complex)
def ComputeSelfEnergy(self) -> None:
self._array2d_self_energy = self._kernel.Mat().dot(self._free_propagator.HoleMat())
def ComupteGreenFunc(self) -> None:
self._array2d_green_func = self._free_propagator.ParticleMat() / (1-self._free_propagator.ParticleMat()*self._array2d_self_energy)
def SelfEnergyMat(self):
return self._array2d_self_energy
def GreenFuncMat(self):
return self._array2d_green_func
def FrequencyDim(self) -> int:
return self.GreenFuncMat().shape[1]
def MomentumDim(self) -> int:
return self.GreenFuncMat().shape[0]
# class PhaseDisorderedSC:
# pass
```
#### File: dos-solver/src/momentum.py
```python
import numpy as np
class LatticeMomentum:
"""
Class for 2d lattice momentum in the 1st BZ,
to which the periodical boundary condition is applied.
"""
def __init__(self, lattice_momentum) -> None:
assert(isinstance(lattice_momentum, type(np.array(2))))
self._latice_momentum = lattice_momentum
self.wrap()
def __add__(self, lattice_momentum):
assert(isinstance(lattice_momentum, LatticeMomentum))
return LatticeMomentum(self.data() + lattice_momentum.data())
def __getitem__(self, index) -> float:
assert(isinstance(index, int))
assert(index == 0 or index == 1)
return self.data()[index]
def data(self) -> np.array(2):
return self._latice_momentum
def abs(self) -> float:
return (self[0]**2 + self[1]**2)**0.5
def energy(self, hopping, chemical_potential) -> float:
assert(isinstance(hopping, float))
assert(isinstance(chemical_potential, float))
# dispersion relation of free lattice model
return -2 * hopping * np.sum(np.cos(self._latice_momentum)) + chemical_potential
"""
Confine the lattice momentum in the 1st BZ, and wrap at the boundaries due to PBC
"""
def wrap(self):
self._latice_momentum = np.array( [self.data()[i] - 2*np.pi*((self.data()[i]+np.pi)//(2*np.pi)) for i in range(len(self.data()))] )
```
|
{
"source": "jeffery-work/SpeechAlgorithms",
"score": 3
}
|
#### File: SpeechAlgorithms/KeyWordSpotting/make_csv.py
```python
import glob
import pandas as pd
import argparse
from sklearn.utils import shuffle
parser = argparse.ArgumentParser()
parser.add_argument('--target', '-t', type=str, default="./dataset/target", help="target folder")
parser.add_argument('--background', '-b', type=str, default="./dataset/background", help="background folder")
parser.add_argument('--csv', '-c', type=str, default="./dataset/data.csv", help="output csv")
def make_csv(target_folder, background_folder):
target_pattern = target_folder + "/*.wav"
background_pattern = background_folder + "/*.wav"
target_list = glob.glob(target_pattern)
target_label = [1] * len(target_list)
background_list = glob.glob(background_pattern)
background_label = [0] * len(background_list)
target_df = pd.DataFrame({"path": target_list, "label":target_label})
background_df = pd.DataFrame({"path": background_list, "label": background_label})
data_df = pd.concat((target_df, background_df))
data_df = shuffle(data_df)
data_df.to_csv(args.csv, index=0)
if __name__ == "__main__":
args = parser.parse_args()
make_csv(args.target, args.background)
print(__file__, "Finished")
```
|
{
"source": "jefferyxm/mmdetection",
"score": 2
}
|
#### File: core/anchor/adaptive_anchor_target.py
```python
import torch
from ..bbox import assign_and_sample, build_assigner, PseudoSampler, bbox2delta
from ..utils import multi_apply
import numpy as np
import numpy.random as npr
def adaptive_anchor_target(anchor_points_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
target_means,
target_stds,
cfg,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
sampling=True,
unmap_outputs=True,
stride_list = [4, 8, 16, 32, 64]):
"""Compute regression and classification targets for anchors.
Args:
anchor_points_list (list[list]): Multi level anchor points of each image.
valid_flag_list (list[list]): Multi level valid flags of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
target_means (Iterable): Mean value of regression targets.
target_stds (Iterable): Std value of regression targets.
cfg (dict): RPN train configs.
Returns:
ARPN training target
include: cls & cls weights
shape_wh & shape_wh_weights
reg_target & reg_target_weights
positive_num & nagative_num
"""
num_imgs = len(img_metas)
assert len(anchor_points_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_ap = [ap.size(0) for ap in anchor_points_list[0]]
norm_list = np.array(stride_list) * 8.0
num_total_pos = 0
num_total_neg = 0
labels_list, shape_wh_list, bbox_targets_list= [], [], []
label_weights_list, shape_wh_weights_list, bbox_weights_list = [], [], []
for im_i in range(num_imgs):
labels, shape_whs, box_targets = [], [], []
bg_list = []
gt_rois = np.array(torch.Tensor.cpu(gt_bboxes_list[im_i]))
for i in range(len(num_level_ap)):
this_level_ap = np.array(torch.Tensor.cpu(anchor_points_list[im_i][i]))
this_level_label = np.zeros((this_level_ap.shape[0], ), dtype=np.float32)
this_level_label_weight = np.zeros((this_level_ap.shape[0], ), dtype=np.float32)
this_level_wh = np.zeros((this_level_ap.shape[0], 2), dtype=np.float32)
this_level_box_delta = np.zeros((this_level_ap.shape[0], 4),dtype=np.float32)
if gt_rois.shape[0] > 0:
# divide the gt by the gts' area
norm = norm_list[i]
# gt_areas = (gt_rois[:, 2] - gt_rois[:, 0] + 1)*(gt_rois[:, 3]-gt_rois[:, 1] + 1)/(norm*norm)
# if i == 0:
# valid_gtidx = np.where(gt_areas <= 2.0)[0]
# elif i == len(num_level_ap) - 1:
# valid_gtidx = np.where(gt_areas >= 0.5)[0]
# else:
# valid_gtidx = np.where((gt_areas <= 2.0) & (gt_areas >= 0.5))[0]
# valid_gts = gt_rois[valid_gtidx, :]
# divid the gt by the short edge of the gt
gt_wh = np.array([(gt_rois[:, 2] - gt_rois[:, 0] + 1), (gt_rois[:, 3]-gt_rois[:, 1] + 1)])
gt_short = np.min(gt_wh, axis=0)/norm
if i == 0:
valid_gtidx = np.where(gt_short <= 1.0)[0]
elif i == len(num_level_ap) - 1:
valid_gtidx = np.where(gt_short >= 0.5)[0]
else:
valid_gtidx = np.where((gt_short <= 1.0) & (gt_short >= 0.5))[0]
valid_gts = gt_rois[valid_gtidx, :]
# set the nearest position to be positive
# not implement
for gt in valid_gts:
center = np.array([(gt[0]+gt[2])/2 , (gt[1]+gt[3])/2])
dis = np.sum(abs(center - this_level_ap), axis=1)
ner_idx = np.where(dis == np.min(dis))[0]
this_level_label[ner_idx] = 1
this_level_label_weight[ner_idx] = 1
ap = this_level_ap[ner_idx][0]
w_best = max(abs(ap[0] - gt[0]) , abs(ap[0] - gt[2]))*2
h_best = max(abs(ap[1] - gt[1]) , abs(ap[1] - gt[3]))*2
this_level_wh[ner_idx] = [w_best/norm, h_best/norm]
valid_apidx = np.empty(0, dtype=np.int32)
for gt in valid_gts:
idx = np.where( (this_level_ap[:,0] > gt[0]) &
(this_level_ap[:,0] < gt[2]) &
(this_level_ap[:,1] > gt[1]) &
(this_level_ap[:,1] < gt[3]) )[0]
valid_apidx = np.append(valid_apidx, idx)
valid_apidx = np.unique(valid_apidx)
valid_aps = this_level_ap[valid_apidx]
m =valid_aps.shape[0]
n =valid_gts.shape[0]
# points transformation
# 1 transform all points to left-up side of the gt boxes and
# 2 set points outside the boxes to the left-up corner
transfm_aps = np.zeros((2,m,n), dtype=np.float32)
tmp_aps = np.empty(valid_aps.shape, dtype=np.float32)
for idx, gt in enumerate(valid_gts):
tmp_aps[:] = valid_aps
# 1
gtcx = 0.5*(gt[0] + gt[2] + 1)
gtcy = 0.5*(gt[1] + gt[3] + 1)
tmp_aps[np.where( ( gtcx - tmp_aps[:,0]) < 0 )[0], 0] \
= 2*gtcx - tmp_aps[ np.where( ( gtcx - tmp_aps[:,0]) < 0 )[0], 0]
tmp_aps[np.where( ( gtcy - tmp_aps[:,1]) < 0 )[0], 1] \
= 2*gtcy - tmp_aps[ np.where( ( gtcy - tmp_aps[:,1]) < 0 )[0], 1]
# 2 add a small value to prevent D & C to be zero
tmp_aps[np.where( (tmp_aps[:,0] <= gt[0]) | (tmp_aps[:,1] <= gt[1]) )[0] ] = gt[0:2] + 0.001
transfm_aps[:, :, idx] = tmp_aps.transpose(1,0)
A = np.zeros((m, n), dtype = np.float32)
A[:] = (valid_gts[:,2] - valid_gts[:, 0] + 1)*(valid_gts[:,3] - valid_gts[:, 1] + 1)
C = ( transfm_aps[0] - (np.tile(valid_gts[:,0], [m, 1])) ) * 0.5
D = ( transfm_aps[1] - (np.tile(valid_gts[:,1], [m, 1])) ) * 0.5
B = 4*C*D
CANDW = np.zeros((4, m, n), dtype = np.float32)
CANDH = np.zeros((4, m, n), dtype = np.float32)
# on the edge of the constrains
CANDW[0:2, :, :] = [4*C, 2*(1 + np.tile(valid_gts[:, 2], [m, 1]) - transfm_aps[0] ) ]
CANDH[0:2, :, :] = [4*D, 2*(1 + np.tile(valid_gts[:, 3], [m, 1]) - transfm_aps[1] ) ]
# inside constrains
sqdelta = np.sqrt(np.power((A-4*B),2) + 64*A*C*D)
a1 = ((A-4*B) + sqdelta)
a2 = ((A-4*B) - sqdelta)
w1 = a1/(8*D)
w1[np.where( (w1-CANDW[0,:,:] < 0) | (w1 - CANDW[1,:,:] > 0) )[0]] = 0
w2 = a2/(8*D)
w2[np.where( (w2-CANDW[0,:,:] < 0) | (w2 - CANDW[1,:,:] > 0) )[0]] = 0
h1 = a1/(8*C)
h1[np.where( (h1 - CANDH[0,:,:] < 0) | (h1 - CANDH[1,:,:] > 0) )[0]] = 0
h2 = a2/(8*C)
h2[np.where( (h2 - CANDH[0,:,:] < 0) | (h2 - CANDH[1,:,:] > 0) )[0]] = 0
CANDW[2:4,:,:] = [w1, w2]
CANDH[2:4,:,:] = [h1, h2]
# conbination of the w & h
CANDWS = np.tile(CANDW, [4,1,1])
CANDHS = np.repeat(CANDH, 4, axis = 0)
IOUS = (B+ C*CANDHS + D*CANDWS + 0.25*CANDWS*CANDHS)/(A-(B + C*CANDHS + D*CANDWS) + 0.75*CANDWS*CANDHS)
IOUS[ np.where( (CANDWS==0) | (CANDHS==0) ) ] = 0
IOU = np.max(IOUS, axis=0)
WHidx = np.argmax(IOUS, axis=0)
enable_idx = np.where(IOU>=0.7)
# generate label map
this_level_label[ valid_apidx[enable_idx[0]] ] = 1
this_level_label_weight[ valid_apidx[enable_idx[0]] ] = 1
this_level_wh[ valid_apidx[enable_idx[0]], 0 ] = \
CANDWS[ WHidx[enable_idx[0], enable_idx[1]], enable_idx[0], enable_idx[1] ]/norm
this_level_wh[ valid_apidx[enable_idx[0]], 1 ] = \
CANDHS[ WHidx[enable_idx[0], enable_idx[1]], enable_idx[0], enable_idx[1] ]/norm
# compute box delta
gt_widths = (valid_gts[enable_idx[1], 2] - valid_gts[enable_idx[1], 0] + 1)
gt_heghts = (valid_gts[enable_idx[1], 3] - valid_gts[enable_idx[1], 1] + 1)
gt_ctrx = valid_gts[enable_idx[1], 0] + 0.5 * gt_widths
gt_ctry = valid_gts[enable_idx[1], 1] + 0.5 * gt_heghts
this_level_box_delta[valid_apidx[enable_idx[0]], 0] = \
(gt_ctrx - this_level_ap[valid_apidx[enable_idx[0]], 0])/(this_level_wh[valid_apidx[enable_idx[0]], 0] * norm)
this_level_box_delta[valid_apidx[enable_idx[0]], 1] = \
(gt_ctry - this_level_ap[valid_apidx[enable_idx[0]], 1])/(this_level_wh[valid_apidx[enable_idx[0]], 1] * norm)
this_level_box_delta[valid_apidx[enable_idx[0]], 2] = \
np.log( gt_widths/(this_level_wh[valid_apidx[enable_idx[0]], 0] * norm) )
this_level_box_delta[valid_apidx[enable_idx[0]], 3] = \
np.log( gt_heghts/(this_level_wh[valid_apidx[enable_idx[0]], 1] * norm) )
cplogidx = np.where(this_level_wh > 0 )
this_level_wh[ cplogidx ] = np.log(this_level_wh[ cplogidx ])
DBG=0
if DBG:
# show label in image
import matplotlib.pyplot as plt
import cv2
img_root = 'data/mix-td900/train/'
im = cv2.imread(img_root + img_metas[0]['imname'])
im = cv2.resize(im, (0,0), fx=img_metas[0]['scale_factor'], fy=img_metas[0]['scale_factor'])
im_plt = im[:,:,(2,1,0)]
plt.cla()
plt.imshow(im_plt)
tg_index = np.where(this_level_label==1)[0]
for tg in tg_index:
w = np.exp(this_level_wh[tg][0])*norm
h = np.exp(this_level_wh[tg][1])*norm
p1 = [(this_level_ap[tg][0] - 0.5*w),
(this_level_ap[tg][1])- 0.5*h]
plt.gca().add_patch(plt.Rectangle((p1[0], p1[1]), w, h ,fill=False, edgecolor='r', linewidth=1))
for gt in valid_gts:
plt.gca().add_patch(plt.Rectangle((gt[0], gt[1]), gt[2]-gt[0], gt[3]-gt[1] ,fill=False, edgecolor='g', linewidth=1))
plt.show()
# save labels into list
labels.append(this_level_label)
shape_whs.append(this_level_wh)
box_targets.append(this_level_box_delta)
bg_map = np.zeros((this_level_ap.shape[0],), dtype=np.int32)
bg_map[valid_apidx] = 1
bg_list.append(bg_map)
# lvl end
# sampling positive and negative
num_total = cfg.sampler.num
pos_fraction = cfg.sampler.pos_fraction
lvl_counts = [x.shape[0] for x in labels]
slice_position = [sum(lvl_counts[:i+1]) for i in range(len(lvl_counts))]
slice_position.insert(0,0)
cat_labels = np.concatenate(tuple(labels), axis=0)
cat_shape_whs = np.concatenate(tuple(shape_whs), axis=0)
cat_box_targets = np.concatenate(tuple(box_targets), axis=0)
cat_bg = np.concatenate(tuple(bg_list), axis=0)
valid_flag = tuple([valid_flag_list[im_i][l] for l in range(len(num_level_ap))])
cat_valid_flag = torch.cat(valid_flag)
cat_valid_flag = np.array(torch.Tensor.cpu(cat_valid_flag))
assert cat_labels.shape[0] == cat_valid_flag.shape[0]
pos_idx = np.where((cat_labels==1) & (cat_valid_flag == 1))[0]
# sub sampling positive if too much
if len(pos_idx) > int(num_total * pos_fraction):
disable_inds = npr.choice(pos_idx, size=(len(pos_idx) - int(num_total * pos_fraction)), replace=False)
cat_labels[disable_inds] = 0
pos_idx = np.where((cat_labels==1) & (cat_valid_flag == 1))[0]
# recover data
cat_labels[disable_inds] = 1
# sub_sampling negative if too much
neg_idx = np.where((cat_bg==0) & (cat_valid_flag == 1))[0]
if len(neg_idx) > int(num_total - len(pos_idx)):
neg_idx = neg_idx[npr.randint(len(neg_idx), size=int(num_total - len(pos_idx)) )]
num_total_pos += len(pos_idx)
num_total_neg += len(neg_idx)
# to cuda
cat_labels = torch.from_numpy(cat_labels).to('cuda')
cat_shape_whs = torch.from_numpy(cat_shape_whs).to('cuda')
cat_box_targets = torch.from_numpy(cat_box_targets).to('cuda')
# get weights
cat_labels_w = torch.zeros((cat_labels.shape[0],), dtype=torch.float32, device = 'cuda')
cat_shape_whs_w = torch.zeros((cat_labels.shape[0], 2), dtype=torch.float32, device = 'cuda')
cat_box_targets_w = torch.zeros((cat_labels.shape[0], 4), dtype=torch.float32, device = 'cuda')
weight_position = np.concatenate((pos_idx, neg_idx))
cat_labels_w[weight_position] = 1.0
cat_shape_whs_w[pos_idx, :] = 1.0
cat_box_targets_w[pos_idx, :] = 1.0
# recover data
labels, shape_whs, box_targets = [], [], []
labels_w, shape_whs_w, box_targets_w = [], [], []
for l in range(len(slice_position)-1):
labels.append(cat_labels[slice_position[l]:slice_position[l+1]])
labels_w.append(cat_labels_w[slice_position[l]:slice_position[l+1]])
shape_whs.append(cat_shape_whs[slice_position[l]:slice_position[l+1]])
shape_whs_w.append(cat_shape_whs_w[slice_position[l]:slice_position[l+1]])
box_targets.append(cat_box_targets[slice_position[l]:slice_position[l+1]])
box_targets_w.append(cat_box_targets_w[slice_position[l]:slice_position[l+1]])
# fit shape to loss input
if im_i == 0:
labels_list = labels.copy()
shape_wh_list = shape_whs.copy()
bbox_targets_list = box_targets.copy()
label_weights_list = labels_w.copy()
shape_wh_weights_list = shape_whs_w.copy()
bbox_weights_list = box_targets_w.copy()
else:
for lvl in range(len(labels)):
labels_list[lvl] = torch.cat((labels_list[lvl], labels[lvl]), 0)
shape_wh_list[lvl] = torch.cat((shape_wh_list[lvl], shape_whs[lvl]), 0)
bbox_targets_list[lvl] = torch.cat((bbox_targets_list[lvl],box_targets[lvl]), 0)
label_weights_list[lvl]= torch.cat((label_weights_list[lvl], labels_w[lvl]), 0)
shape_wh_weights_list[lvl] = torch.cat((shape_wh_weights_list[lvl], shape_whs_w[lvl]), 0)
bbox_weights_list[lvl] = torch.cat((bbox_weights_list[lvl], box_targets_w[lvl]), 0)
DBG=0
if DBG :
# show label in image
import matplotlib.pyplot as plt
import cv2
img_root = 'data/icdar2015/train/'
im = cv2.imread(img_root + img_metas[im_i]['imname'])
im = cv2.resize(im, (0,0), fx=img_metas[im_i]['scale_factor'], fy=img_metas[im_i]['scale_factor'])
im_plt = im[:,:,(2,1,0)]
plt.cla()
plt.subplot(2,2,1)
plt.imshow(im_plt)
score_map = labels_list[0]
score_map_w = label_weights_list[0]
shape_map = shape_wh_list[0]
shape_map_w = shape_wh_weights_list[0]
score_map = torch.Tensor.cpu(score_map)
shape_map =torch.Tensor.cpu(shape_map)
shape_map_w = torch.Tensor.cpu(shape_map_w)
score_map_w = torch.Tensor.cpu(score_map_w)
feat_h, feat_w = img_metas[im_i]['pad_shape'][0]//4, img_metas[im_i]['pad_shape'][1]//4
score_map = torch.reshape(score_map, (1, feat_h, feat_w, 1))
score_map = score_map.permute((0,3,1,2))
shape_map = torch.reshape(shape_map, (1, feat_h, feat_w, 2))
shape_map = shape_map.permute((0,3,1,2))
score_map_w = torch.reshape(score_map_w, (1, feat_h, feat_w, 1))
score_map_w = score_map_w.permute((0,3,1,2))
plt.subplot(2,2,2)
plt.imshow(score_map[0,0,:,:], cmap=plt.cm.hot)
plt.subplot(2,2,3)
plt.imshow(shape_map[0,0,:,:], cmap=plt.cm.hot)
plt.subplot(2,2,4)
plt.imshow(score_map_w[0,0,:,:], cmap=plt.cm.hot)
plt.show()
return (labels_list, label_weights_list,
shape_wh_list, shape_wh_weights_list,
bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg)
```
#### File: models/anchor_heads/adaptive_rpn_head.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmdet.core import wh_delta2bbox
from mmdet.ops import nms
from .adaptive_anchor_head import AdaptiveAnchorHead
from ..registry import HEADS
@HEADS.register_module
class ARPNHead(AdaptiveAnchorHead):
def __init__(self, in_channels, **kwargs):
super(ARPNHead, self).__init__(2, in_channels, **kwargs)
def _init_layers(self):
self.arpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
self.arpn_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)
self.arpn_wh = nn.Conv2d(self.feat_channels, 2, 1)
self.arpn_reg = nn.Conv2d(self.feat_channels, 4, 1)
def init_weights(self):
normal_init(self.arpn_conv, std=0.01)
normal_init(self.arpn_wh, std=0.01)
normal_init(self.arpn_cls, std=0.01)
normal_init(self.arpn_reg, std=0.01)
def forward_single(self, x):
x = self.arpn_conv(x)
x = F.relu(x, inplace=True)
arpn_cls_score = self.arpn_cls(x)
arpn_shape_wh = self.arpn_wh(x)
arpn_bbox_pred = self.arpn_reg(x)
return arpn_cls_score, arpn_shape_wh, arpn_bbox_pred
def loss(self,
cls_scores,
wh_preds,
bbox_preds,
gt_bboxes,
img_metas,
cfg,
gt_bboxes_ignore=None):
losses = super(ARPNHead, self).loss(
cls_scores,
wh_preds,
bbox_preds,
gt_bboxes,
None,
img_metas,
cfg,
gt_bboxes_ignore=gt_bboxes_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'], loss_rpn_wh=losses['loss_wh'], loss_rpn_reg=losses['loss_reg'])
def get_bboxes_single(self,
cls_scores,
shape_whs,
bbox_preds,
mlvl_anchor_points,
img_shape,
scale_factor,
cfg,
rescale=False):
mlvl_proposals = []
for idx in range(len(cls_scores)):
arpn_cls_score = cls_scores[idx]
arpn_wh_pred = shape_whs[idx]
arpn_bbox_pred = bbox_preds[idx]
assert arpn_cls_score.size()[-2:] == arpn_bbox_pred.size()[-2:]
anchor_points = mlvl_anchor_points[idx]
arpn_cls_score = arpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
arpn_cls_score = arpn_cls_score.reshape(-1)
scores = arpn_cls_score.sigmoid()
else:
arpn_cls_score = arpn_cls_score.reshape(-1, 2)
scores = arpn_cls_score.softmax(dim=1)[:, 1]
arpn_wh_pred = arpn_wh_pred.permute(1, 2, 0).reshape(-1, 2)
arpn_bbox_pred = arpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
_, topk_inds = scores.topk(cfg.nms_pre)
arpn_wh_pred = arpn_wh_pred[topk_inds, :]
arpn_bbox_pred = arpn_bbox_pred[topk_inds, :]
anchor_points = anchor_points[topk_inds, :]
scores = scores[topk_inds]
norm = 2**(idx+2) * 8.0
proposals = wh_delta2bbox(anchor_points, arpn_wh_pred, arpn_bbox_pred,
self.target_means, self.target_stds, img_shape, norm)
if cfg.min_bbox_size > 0:
w = proposals[:, 2] - proposals[:, 0] + 1
h = proposals[:, 3] - proposals[:, 1] + 1
valid_inds = torch.nonzero((w >= cfg.min_bbox_size) &
(h >= cfg.min_bbox_size)).squeeze()
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1)
proposals, _ = nms(proposals, cfg.nms_thr)
proposals = proposals[:cfg.nms_post, :]
mlvl_proposals.append(proposals)
proposals = torch.cat(mlvl_proposals, 0)
if cfg.nms_across_levels:
proposals, _ = nms(proposals, cfg.nms_thr)
proposals = proposals[:cfg.max_num, :]
else:
scores = proposals[:, 4]
num = min(cfg.max_num, proposals.shape[0])
_, topk_inds = scores.topk(num)
proposals = proposals[topk_inds, :]
return proposals
```
|
{
"source": "Jeffery-zhang-nfls/baby-cry-recognition-baseline",
"score": 2
}
|
#### File: Jeffery-zhang-nfls/baby-cry-recognition-baseline/main.py
```python
from __future__ import print_function
import argparse
import os
import sys
import shutil
import time
import random
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch.utils.data import Dataset, DataLoader
from multiprocessing import cpu_count
import PIL
from utils import Bar, AverageMeter, accuracy, mkdir_p
import models as customized_models
from datasets import TrainDatasetByFeatures
from loss_utils import AMSoftmax
# CONST
train_epochs = 1000 # 100
train_batch = 32 # 256
test_batch = 16 # 100
# OPTIMIZER
weight_decay = 0.0002 # 1e-4
# Models
default_model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
customized_models_names = sorted(name for name in customized_models.__dict__
if name.islower() and not name.startswith("__")
and callable(customized_models.__dict__[name]))
for name in customized_models.__dict__:
if name.islower() and not name.startswith("__") and callable(customized_models.__dict__[name]):
models.__dict__[name] = customized_models.__dict__[name]
model_names = default_model_names + customized_models_names
# Parse arguments
parser = argparse.ArgumentParser(description='PyTorch DeepModel Training')
# Datasets
parser.add_argument('-d', '--data', default='path to dataset', type=str)
parser.add_argument('-j', '--workers', default=cpu_count(), type=int, metavar='N',
help='number of data loading workers (default: 4)')
# Optimization options
parser.add_argument('--epochs', default=1000, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=1024, type=int, metavar='N',
help='train batchsize (default: 256)')
parser.add_argument('--test-batch', default=16, type=int, metavar='N',
help='test batchsize (default: 100)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--schedule', type=int, nargs='+', default=[30, 60, 90],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0.0002, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# Checkpoints
parser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# Architecture
parser.add_argument('--arch', '-a', metavar='ARCH', default='se_resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--classifier_opt', '-cls', metavar='Classifier_Opt', default='am', help="classifier_opt")
parser.add_argument('--depth', type=int, default=29, help='Model depth.')
parser.add_argument('--cardinality', type=int, default=32, help='ResNet cardinality (group).')
parser.add_argument('--base-width', type=int, default=4, help='ResNet base width.')
parser.add_argument('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')
# Miscs
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--model_weight', dest='model_weight', default=None, type=str,
help='custom pretrained model weight')
# Device options
parser.add_argument('--cpu', dest='cpu', action='store_true',
help='use cpu mode')
parser.add_argument('--gpu_id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
# Use CUDA
if args.cpu:
print('Use CPU mode')
use_cuda = False
pin_memory = False
else:
print('Use CUDA mode')
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
pin_memory = True
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc = 0 # best test accuracy
################################################################################################################
class totensor(object):
def __call__(self, pic):
if isinstance(pic, np.ndarray):
img = torch.FloatTensor(pic.transpose((0, 2, 1)))
return img
transform = transforms.Compose([totensor()])
################################################################################################################
def main():
args.checkpoint = "exp/{}".format(args.arch)
# args.model_weight = "pretrained/{}.pth.tar".format(args.arch)
print(args)
global best_acc
start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# Data loading code
train_dir = SoftmaxDatasetByFeatures(transform=transform)
# val_dir = ValidationDatasetByFeatures(transform=transform)
# 因为dataset里面 是随机取样本的,所以这里的data_loader 的shuffle设置为False
train_loader = DataLoader(train_dir, batch_size=args.train_batch, num_workers=args.workers, shuffle=False)
# val_loader = DataLoader(val_dir, batch_size=args.test_batch, num_workers=args.workers, shuffle=False)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
elif args.arch.startswith('resnext'):
model = models.__dict__[args.arch](
baseWidth=args.base_width,
cardinality=args.cardinality,
)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if use_cuda:
# model = torch.nn.DataParallel(model).cuda() # 会引起各种奇怪的错误,如在使用CPUmode 的时候,map_location也不起作用
model = model.cuda()
# cudnn.benchmark = True
print(model)
if args.model_weight:
model_weight = torch.load(args.model_weight)
model.load_state_dict(model_weight['state_dict'])
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
# define loss function (criterion) and optimizer
# criterion = nn.CrossEntropyLoss()
if "am" == args.classifier_opt:
criterion = AMSoftmax()
# criterion = AMSoftmax(in_feats=6, n_classes=6)
elif "a" == args.classifier_opt:
criterion = AngleLoss()
else:
raise Exception("unsupported classifier_opt: {}".format(args.classifier_opt))
if use_cuda:
criterion = criterion.cuda()
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# 这里使用 Adagrad
optimizer = optim.Adagrad(model.parameters(),lr=args.lr,lr_decay=1e-4,weight_decay=args.weight_decay)
# Resume
title = 'RegBabyCry-' + args.arch
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(val_loader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Top1 Acc: %.2f ' % (test_loss, test_acc))
print(' Top1 Err: %.2f' % (100.0 - test_acc))
return
# Train and val
# test_loss, test_acc = test(val_loader, model, criterion, start_epoch, use_cuda)
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, use_cuda)
# test_loss, test_acc = test(val_loader, model, criterion, epoch, use_cuda)
# save model
is_best = train_acc > best_acc
best_acc = max(train_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': train_acc,
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
print('Best acc:')
print(best_acc)
def train(train_loader, model, criterion, optimizer, epoch, use_cuda):
# switch to train mode
model.train()
# torch.set_grad_enabled(True)
torch.autograd.set_detect_anomaly(True)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
end = time.time()
bar = Bar('P', max=len(train_loader))
pbar = tqdm(enumerate(train_loader))
for batch_idx, (inputs, targets) in pbar:
# measure data loading time
data_time.update(time.time() - end)
# print("inputs: ", inputs.size(), inputs.dtype, inputs.requires_grad)
# print("targets: ", targets.size(), targets.dtype, targets.requires_grad)
# print(targets)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
# print("inputs: ", inputs.size(), inputs.dtype, inputs.requires_grad)
# print("targets: ", targets.size(), targets.dtype, targets.requires_grad)
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
# print("inputs: ", inputs.size(), inputs.dtype, inputs.requires_grad)
# print("targets: ", targets.size(), targets.dtype, targets.requires_grad)
# compute output
out_fea, out_cls = model(inputs)
# print("out_fea: ", out_fea.size(), out_fea.dtype, out_fea.requires_grad)
# print("out_cls: ", out_cls.size(), out_cls.dtype, out_cls.requires_grad)
loss = criterion(out_cls, targets, scale=30.0, margin=0.35)
if "a" == args.classifier_opt:
prec1, = accuracy(out_cls[0].data, targets.data, topk=(1,))
elif "am" == args.classifier_opt:
prec1, = accuracy(out_cls.data, targets.data, topk=(1,))
# measure accuracy and record loss
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
if (batch_idx + 1) % 1 == 0:
# print(
# 'train ({batch}/{size}) D: {data:.2f}s | B: {bt:.2f}s | T: {total:} | E: {eta:} | L: {loss:.3f} | t1: {top1: .3f} '.format(
# batch=batch_idx + 1,
# size=len(train_loader),
# data=data_time.val,
# bt=batch_time.val,
# total=bar.elapsed_td,
# eta=bar.eta_td,
# loss=losses.avg,
# top1=top1.avg,
# ))
pbar.set_description(
'train ({batch}/{size}) D: {data:.2f}s | B: {bt:.2f}s | T: {total:} | E: {eta:} | L: {loss:.3f} | t1: {top1: .3f} '.format(
batch=batch_idx + 1,
size=len(train_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
))
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(val_loader, model, criterion, epoch, use_cuda):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar('P', max=len(val_loader))
pbar = tqdm(enumerate(val_loader))
for batch_idx, (inputs, targets) in pbar:
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
# compute output
end = time.time()
outputs = model(inputs)
# print("outputs: ", outputs)
# print("targets: ", targets)
batch_time.update(time.time() - end)
loss = criterion(outputs, targets, scale=30.0, margin=0.35)
prec1, = accuracy(outputs.data, targets.data, topk=(1,))
# measure accuracy and record loss
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
# plot progress
if (batch_idx + 1) % 1 == 0:
# print(
# 'test ({batch}/{size}) D: {data:.2f}s | B: {bt:.2f}s | T: {total:} | E: {eta:} | L: {loss:.3f} | t1: {top1: .3f} '.format(
# batch=batch_idx + 1,
# size=len(val_loader),
# data=data_time.avg,
# bt=batch_time.avg,
# total=bar.elapsed_td,
# eta=bar.eta_td,
# loss=losses.avg,
# top1=top1.avg,
# ))
pbar.set_description(
'test ({batch}/{size}) D: {data:.2f}s | B: {bt:.2f}s | T: {total:} | E: {eta:} | L: {loss:.3f} | t1: {top1: .3f} '.format(
batch=batch_idx + 1,
size=len(val_loader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
))
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
if __name__ == '__main__':
main()
```
|
{
"source": "jeffesp/aqi",
"score": 3
}
|
#### File: jeffesp/aqi/data.py
```python
import sqlite3
import json
from datetime import datetime
from pytz import timezone, utc
CREATE_WEATHER = '''CREATE TABLE IF NOT EXISTS weather (
lat REAL NOT NULL,
long REAL NOT NULL,
ts timestamp NOT NULL,
value TEXT);'''
CREATE_WEATHER_INDEX = '\
CREATE UNIQUE INDEX IF NOT EXISTS query_values ON weather (lat, long, ts DESC);'
INSERT_WEATHER = 'INSERT INTO weather VALUES (?, ?, ?, ?);'
SELECT_WEATHER = '\
SELECT value FROM weather WHERE lat = ? AND long = ? AND ts = ?;'
SELECT_LATEST_WEATHER = '\
SELECT value FROM weather WHERE lat = ? AND long = ? ORDER BY ts DESC LIMIT 1;'
CREATE_AIR_QUALITY = '''CREATE TABLE IF NOT EXISTS air_quality (
lat REAL NOT NULL,
long REAL NOT NULL,
ts timestamp NOT NULL,
value TEXT);'''
CREATE_AIR_QUALITY_INDEX = '\
CREATE UNIQUE INDEX IF NOT EXISTS query_values ON air_quality (lat, long, ts DESC);'
INSERT_AIR_QUALITY = 'INSERT INTO air_quality VALUES (?, ?, ?, ?);'
SELECT_AIR_QUALITY = '\
SELECT value FROM air_quality WHERE lat = ? AND long = ? AND ts = ?;'
SELECT_LATEST_AIR_QUALITY = '\
SELECT value FROM air_quality WHERE lat = ? AND long = ? ORDER BY ts DESC LIMIT 1;'
def ensure_db_exists(data):
data.execute(CREATE_WEATHER)
data.execute(CREATE_WEATHER_INDEX)
data.execute(CREATE_AIR_QUALITY)
data.execute(CREATE_AIR_QUALITY_INDEX)
def get_timestamp(epoch_seconds, tz_string):
zone = timezone(tz_string)
stamp = datetime.fromtimestamp(epoch_seconds, zone)
return utc.normalize(stamp)
def add_weather(data, location, value):
time_stamp = get_timestamp(value['daily']['data'][0]['time'], value['timezone'])
with data:
data.execute(INSERT_WEATHER,
(location[0], location[1], time_stamp, json.dumps(value)))
def find_weather(data, lat, lng, ts):
return [json.loads(v[0]) for v in data.execute(SELECT_WEATHER, (lat, lng, ts)).fetchall()]
def get_latest_weather(data, lat, lng):
cursor = data.cursor()
value = cursor.execute(SELECT_LATEST_WEATHER, (lat, lng)).fetchone()
if value:
return json.loads(value[0])
else:
return None
def add_air_quality(data, location, value, tz=None):
time_stamp = get_timestamp(value['date'], tz or data['timezone'])
with data:
data.execute(INSERT_AIR_QUALITY,
(location[0], location[1], time_stamp, json.dumps(value)))
def get_latest_air_quality(data, lat, lng):
cursor = data.cursor()
value = cursor.execute(SELECT_LATEST_AIR_QUALITY, (lat, lng)).fetchone()
if value:
return json.loads(value[0])
else:
return None
if __name__ == '__main__':
db = sqlite3.connect('aqi.sqlite')
ensure_db_exists(db)
get_latest_weather(db, 39.954352, 116.466258)
```
#### File: aqi/tests/test_data.py
```python
from nose import with_setup
import feedparser
from .. import air_quality
class TestAirQualityParsing(object):
def test_read_rss_item(self):
parsed = feedparser.parse('../sample-airquality.xml')
result = air_quality.parse_entry(parsed.entries[0])
assert result['date'] == 1512644400
assert result['aqi'] == 34
assert result['pm25'] == 8.0
```
|
{
"source": "jeffessongomes/frwsaude",
"score": 2
}
|
#### File: apps/principal/models.py
```python
from django.db import models
class Ingredients(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=255)
price = models.IntegerField(null=True, blank=True, default=0)
ingredients = models.ManyToManyField(Ingredients, blank=True)
image = models.ImageField(upload_to='image', null=True)
details = models.TextField(null=True)
def __str__(self):
return self.name
class Value(models.Model):
value = models.CharField(max_length=255)
def __str__(self):
return self.value
class Contact(models.Model):
name = models.CharField(max_length=255)
email = models.EmailField()
title = models.CharField(max_length=255)
details = models.TextField()
def __str__(self):
return self.name
class Initial(models.Model):
title = models.TextField()
subtitle = models.TextField()
def __str__(self):
return self.title
class How_Use(models.Model):
title = models.CharField(max_length=500, null=True)
details = models.TextField()
image = models.ImageField(upload_to='image/how_use')
def __str__(self):
return self.title
class How_Use_Text(models.Model):
title = models.CharField(max_length=500)
subtitle = models.CharField(max_length=500)
details = models.TextField()
class Video_Description(models.Model):
title = models.CharField(max_length=500, null=True)
details = models.TextField()
video = models.FileField(upload_to='video')
class TecDashImages(models.Model):
title = models.CharField(max_length=500, null=True)
slug = models.CharField(max_length=100, null=True)
details = models.TextField()
image = models.ImageField(upload_to='image/how_use')
def __str__(self):
return self.title
class ProductsPrize(models.Model):
qnt = models.CharField(max_length=100, null=True)
image = models.ImageField(upload_to="image/products_img")
preco_one = models.CharField(max_length=60, null=True)
por_div = models.CharField(max_length=60, null=True)
prize_div = models.CharField(max_length=80, null=True)
buy_link = models.CharField(max_length=100, null=True)
def __str__(self):
return self.preco_one
class Footer(models.Model):
text = models.TextField()
```
#### File: apps/principal/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from apps.inicial.models import Doubt, Client, PhotoProdutoAfterBefore
from .models import Product, Ingredients, Contact, Initial, How_Use, Video_Description, How_Use_Text, TecDashImages
from .models import ProductsPrize, Footer, Value
from .forms import DoubtForm, ClientForm, ProductForm, IngredientsForm, CaseForm, VideoDescriptionForm
from .forms import InitialForm, HowUseForm, HowUseTextForm, ProductPrizeForm, FooterForm, ValueForm
from django.core.mail import send_mail
from django.core import mail
from django.template.loader import render_to_string
from .tokens import account_activation_token
import json
@login_required(login_url='login')
def index(request):
option = TecDashImages.objects.all()
context = {
'option': option
}
return render(request, 'dashboard/dashboard.html', context)
# crud initial
@login_required(login_url='login')
def edit_value(request):
data = {}
value = Value.objects.get(pk=1)
if request.method == 'POST':
form = ValueForm(data=request.POST, instance=value)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = ValueForm(instance=value)
data['form'] = form; data['value'] = value;
return render(request, 'dashboard/value/edit-value.html', data)
def verifify_user(email, password):
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = None
if user is not None:
user = authenticate(username=user.username, password=password)
return user
else:
return user
def do_login(request):
if request.method == 'POST':
email = request.POST['email'];
password = request.POST['password'];
user = verifify_user(email, password)
if user is not None:
login(request, user)
return redirect('index')
else:
error = True
return render(request, 'dashboard/telaLogin.html', {'error': error})
return render(request, 'dashboard/telaLogin.html')
def do_logout(request):
logout(request)
return redirect('login')
def forget_password(request):
if request.method == 'POST':
try:
user = User.objects.get(email=request.POST['email'])
except User.DoesNotExist:
user = None
if user != None:
msg_html = render_to_string('dashboard/email.html', {
'user': user,
'token':account_activation_token.make_token(user)
})
connection = mail.get_connection()
connection.open()
email = mail.EmailMessage(
'Suporte - RemixManiacs',
msg_html,
'<EMAIL>', # 'from'
[user.email,], # 'to'
connection=connection
)
email.send()
confirmEmail = True
return render(request, 'dashboard/forget-pass.html', {'confirmEmail': confirmEmail})
else:
error = True
return render(request, 'dashboard/forget-pass.html', {'error': error})
else:
return render(request, 'dashboard/forget-pass.html')
def confirme_password(request, pk, token):
try:
user = User.objects.get(pk=pk)
except User.DoesNotExist:
user = None
if user != None:
if account_activation_token.check_token(user, token):
return render(request, 'dashboard/change-password.html', {'user_status':user})
else:
return HttpResponse("<h1 align='center'>Token does not exist</h1></br></br><a align='center' href='login'>click here</a>")
else:
return HttpResponse("<h1 align='center'>User does not exist</h1></br></br><a align='center' href='forget_password'>click here</a>")
def change_password(request):
if request.method == 'POST':
password1 = request.POST['password1']
password2 = request.POST['password2']
if password1 == password2:
user_status = request.POST['user_status']
try:
user = User.objects.get(pk=user_status)
except User.DoesNotExist:
user = None
except ValueError:
user = None
if user != None:
user.set_password(<PASSWORD>)
user.save()
return redirect('login')
else:
return HttpResponse("<h1 align='center'>Please, use the link of your email</h1>")
else:
error = True
return render(request, 'dashboard/change-password.html', {'error':error})
error = False
return render(request, 'dashboard/change-password.html', {'error':error})
# client doubt
@login_required(login_url='login')
def add_doubt(request):
data = {}
if request.method == 'POST':
form = DoubtForm(data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect('list_doubt')
else:
HttpResponse(json.dumps(form.errors))
else:
form = DoubtForm()
data['form'] = form
return render(request, 'dashboard/doubt/add-doubt.html', data)
@login_required(login_url='login')
def list_doubt(request):
data = {}
doubts = Doubt.objects.all()
data['doubts'] = doubts
return render(request, 'dashboard/doubt/list-doubt.html', data)
@login_required(login_url='login')
def edit_doubt(request, pk):
data = {}
doubt = Doubt.objects.get(pk=pk)
if request.method == 'POST':
form = DoubtForm(data=request.POST, instance=doubt)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = DoubtForm(instance=doubt)
data['form'] = form; data['doubt'] = doubt;
return render(request, 'dashboard/doubt/edit-doubt.html', data)
@login_required(login_url='login')
def delete_doubt(request, pk):
doubt = Doubt.objects.get(pk=pk)
doubt.delete()
return redirect('list_doubt')
# crud client
@login_required(login_url='login')
def add_client(request):
data = {}
if request.method == 'POST':
form = ClientForm(data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect('list_client')
else:
HttpResponse(json.dumps(form.errors))
else:
form = ClientForm()
data['form'] = form
return render(request, 'dashboard/client/add-client.html', data)
@login_required(login_url='login')
def list_client(request):
data = {}
clients = Client.objects.all()
data['clients'] = clients
return render(request, 'dashboard/client/list-client.html', data)
@login_required(login_url='login')
def edit_client(request, pk):
data = {}
client = Client.objects.get(pk=pk)
if request.method == 'POST':
form = ClientForm(data=request.POST, instance=client)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = ClientForm(instance=client)
data['form'] = form; data['client'] = client;
return render(request, 'dashboard/client/edit-client.html', data)
@login_required(login_url='login')
def delete_client(request, pk):
client = Client.objects.get(pk=pk)
client.delete()
return redirect('index')
# crud product
@login_required(login_url='login')
def add_product(request):
data = {}
if request.method == 'POST':
form = ProductForm(data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect('list_product')
else:
HttpResponse(json.dumps(form.errors))
else:
form = ProductForm()
data['form'] = form
return render(request, 'dashboard/product/add-product.html', data)
@login_required(login_url='login')
def list_product(request):
data = {}
products = Product.objects.all()
data['products'] = products
return render(request, 'dashboard/product/list-product.html', data)
@login_required(login_url='login')
def edit_product(request, pk):
data = {}
product = Product.objects.get(pk=pk)
if request.method == 'POST':
product.image.delete()
form = ProductForm(data=request.POST, files=request.FILES, instance=product)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = ProductForm(instance=product)
data['form'] = form; data['product'] = product;
return render(request, 'dashboard/product/edit-product.html', data)
@login_required(login_url='login')
def delete_product(request, pk):
product = Product.objects.get(pk=pk)
product.delete()
return redirect('index')
# crud ingredient
@login_required(login_url='login')
def add_ingredients(request):
data = {}
if request.method == 'POST':
form = IngredientsForm(data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect('list_ingredients')
else:
HttpResponse(json.dumps(form.errors))
else:
form = IngredientsForm()
data['form'] = form
return render(request, 'dashboard/ingredient/add-ingredient.html', data)
@login_required(login_url='login')
def list_ingredients(request):
data = {}
ingredients = Ingredients.objects.all()
data['ingredients'] = ingredients
return render(request, 'dashboard/ingredient/list-ingredient.html', data)
@login_required(login_url='login')
def edit_ingredients(request, pk):
data = {}
ingredient = Ingredients.objects.get(pk=pk)
if request.method == 'POST':
form = IngredientsForm(data=request.POST, instance=ingredient)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = IngredientsForm(instance=ingredient)
data['form'] = form; data['ingredient'] = ingredient;
return render(request, 'dashboard/ingredient/edit-ingredient.html', data)
@login_required(login_url='login')
def delete_ingredients(request, pk):
ingredient = Ingredients.objects.get(pk=pk)
ingredient.delete()
return redirect('index')
# crud cases
@login_required(login_url='login')
def add_case(request):
data = {}
if request.method == 'POST':
form = CaseForm(data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect('list_case')
else:
HttpResponse(json.dumps(form.errors))
else:
form = CaseForm()
data['form'] = form
return render(request, 'dashboard/case/add_case.html', data)
@login_required(login_url='login')
def list_case(request):
data = {}
cases = PhotoProdutoAfterBefore.objects.all()
data['cases'] = cases
return render(request, 'dashboard/case/list-case.html', data)
@login_required(login_url='login')
def edit_case(request, pk):
data = {}
case = PhotoProdutoAfterBefore.objects.get(pk=pk)
if request.method == 'POST':
case.photo.delete()
form = CaseForm(data=request.POST, files=request.FILES, instance=case)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = CaseForm(instance=case)
data['form'] = form; data['case'] = case;
return render(request, 'dashboard/case/edit-case.html', data)
@login_required(login_url='login')
def delete_case(request, pk):
case = PhotoProdutoAfterBefore.objects.get(pk=pk)
case.delete()
return redirect('index')
# crud initial
@login_required(login_url='login')
def edit_initial(request):
data = {}
initial = Initial.objects.get(pk=1)
if request.method == 'POST':
form = InitialForm(data=request.POST, instance=initial)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = InitialForm(instance=initial)
data['form'] = form; data['initial'] = initial;
return render(request, 'dashboard/initial/edit-initial.html', data)
# crud footer
@login_required(login_url='login')
def edit_footer(request):
data = {}
footer = Footer.objects.get(pk=1)
if request.method == 'POST':
form = FooterForm(data=request.POST, instance=footer)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = FooterForm(instance=footer)
data['form'] = form; data['footer'] = footer;
return render(request, 'dashboard/footer/edit-footer.html', data)
# crud How Use
@login_required(login_url='login')
def add_how(request):
data = {}
if request.method == 'POST':
form = HowUseForm(data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect('list_how')
else:
HttpResponse(json.dumps(form.errors))
else:
form = HowUseForm()
data['form'] = form
return render(request, 'dashboard/how/add-how.html', data)
@login_required(login_url='login')
def list_how(request):
data = {}
hows = How_Use.objects.all()
data['hows'] = hows
return render(request, 'dashboard/how/list-how.html', data)
@login_required(login_url='login')
def edit_how(request, pk):
data = {}
how = How_Use.objects.get(pk=pk)
if request.method == 'POST':
how.image.delete()
form = HowUseForm(data=request.POST, files=request.FILES, instance=how)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = HowUseForm(instance=how)
data['form'] = form; data['how'] = how;
return render(request, 'dashboard/how/edit-how.html', data)
@login_required(login_url='login')
def delete_how(request, pk):
how = How_Use.objects.get(pk=pk)
how.delete()
return redirect('index')
# crud how text
@login_required(login_url='login')
def edit_how_text(request):
data = {}
how = How_Use_Text.objects.get(pk=1)
if request.method == 'POST':
form = HowUseTextForm(data=request.POST, instance=how)
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = HowUseTextForm(instance=how)
data['form'] = form; data['how'] = how;
return render(request, 'dashboard/how_text/edit-how-text.html', data)
# crud how text
@login_required(login_url='login')
def edit_video_description(request):
data = {}
video = Video_Description.objects.get(pk=1)
if request.method == 'POST':
form = VideoDescriptionForm(data=request.POST, files=request.FILES, instance=video)
video.video.delete()
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = VideoDescriptionForm(instance=video)
data['form'] = form; data['video'] = video;
return render(request, 'dashboard/video_description/edit-video-description.html', data)
# crud product prize
@login_required(login_url='login')
def add_product_prize(request):
data = {}
if request.method == 'POST':
form = ProductPrizeForm(data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect('list_product_prize')
else:
HttpResponse(json.dumps(form.errors))
else:
form = ProductPrizeForm()
data['form'] = form
return render(request, 'dashboard/product_prize/add_product_prize.html', data)
@login_required(login_url='login')
def list_product_prize(request):
data = {}
product_prize = ProductsPrize.objects.all()
data['product_prize'] = product_prize
return render(request, 'dashboard/product_prize/list_product_prize.html', data)
@login_required(login_url='login')
def edit_product_prize(request, pk):
data = {}
product_prize = ProductsPrize.objects.get(pk=pk)
if request.method == 'POST':
form = ProductPrizeForm(data=request.POST, files=request.FILES, instance=product_prize)
product_prize.image.delete()
if form.is_valid():
form.save()
return redirect('index')
else:
HttpResponse(json.dumps(form.errors))
else:
form = ProductPrizeForm(instance=product_prize)
data['form'] = form; data['product_prize'] = product_prize;
return render(request, 'dashboard/product_prize/edit_product_prize.html', data)
@login_required(login_url='login')
def delete_product_prize(request, pk):
product_prize = ProductsPrize.objects.get(pk=pk)
product_prize.image.delete()
product_prize.delete()
return redirect('index')
```
|
{
"source": "jeffeuxMartin/WikiExtractor",
"score": 3
}
|
#### File: WikiExtractor/wiki/sql2csv.py
```python
import csv
import codecs
import sys
csv.field_size_limit(2 ** 16)
def is_insert(line):
return line.startswith('INSERT INTO') or False
def get_values(line):
return line.partition('` VALUES ')[2]
def values_sanity_check(values):
assert values
assert values[0] == '('
return True
def parse_values(values):
latest_row = []
reader = csv.reader([values], delimiter=',',
doublequote=False,
escapechar='\\',
quotechar="'",
strict=True
)
for reader_row in reader:
for column in reader_row:
if len(column) == 0 or column == 'NULL':
latest_row.append(chr(0))
continue
if column[0] == "(":
new_row = False
if len(latest_row) > 0:
if latest_row[-1][-1] == ")":
latest_row[-1] = latest_row[-1][:-1]
new_row = True
if new_row:
yield latest_row
latest_row = []
if len(latest_row) == 0:
column = column[1:]
latest_row.append(column)
if latest_row[-1][-2:] == ");":
latest_row[-1] = latest_row[-1][:-2]
yield latest_row
def sql2csv(input_file):
with codecs.open(input_file, 'r', encoding='utf-8', errors='ignore') as inputfile:
try:
for line in inputfile.readlines():
if is_insert(line):
values = get_values(line)
if values_sanity_check(values):
yield parse_values(values)
except KeyboardInterrupt:
sys.exit(0)
```
|
{
"source": "JeffeVargas/First_Project",
"score": 3
}
|
#### File: Central_of_Functions-v2/crawler0/crawlerhub.py
```python
from crawler0.clima import verclima
from crawler0.dolar import verdolar
from crawler0.euro import vereuro
from crawler0.tradutor import traduzir
from crawler0.covid import covid
from crawler0.crawler_de_imagem import baixarimagem
from os import system
def crawlerhub():
while True:
print('='*80)
print('''
O que deseja fazer?
[ 1 ] Clima
[ 2 ] Crawler de Imagem
[ 3 ] Euro
[ 4 ] Dolar
[ 5 ] Tradutor
[ 6 ] Covid
[ m ] Menu
''')
print('='*80)
pergunta = str(input('Sua resposta: '))
system('cls')
if pergunta == '1':
verclima()
elif pergunta == '2':
baixarimagem()
elif pergunta == '3':
vereuro()
elif pergunta == '4':
verdolar()
elif pergunta == '5':
traduzir()
elif pergunta == '6':
covid()
elif pergunta == 'm':
break
```
#### File: Central_of_Functions-v2/crawler0/tradutor.py
```python
from deep_translator import GoogleTranslator
from os import system
def traduzir():
volt = True
langs_dict = GoogleTranslator.get_supported_languages(as_dict=True)
lingua1 = ''
lingua2 = ''
while volt:
while lingua1 not in langs_dict.keys() and lingua2 not in langs_dict.keys() :
print('='*80)
lingua1 = input('Língua materna: ')
lingua1 = (GoogleTranslator(source='pt', target='en').translate(lingua1)).lower()
lingua2 = input('Língua da tradução: ')
lingua2 = (GoogleTranslator(source='pt', target='en').translate(lingua2) ).lower()
pl = input('Digite o texto a ser traduzido: ')
if not lingua1 in langs_dict.keys():
print('essa lingua nao existe em nosso sistema {}'.format(lingua1))
if not lingua2 in langs_dict.keys():
print('essa lingua nao existe em nosso sistema {}'.format(lingua2))
if lingua1 in langs_dict.keys() and lingua2 in langs_dict.keys():
sigla1 = langs_dict[lingua1]
sigla2 = langs_dict[lingua2]
translated = GoogleTranslator(source=sigla1, target=sigla2).translate(pl)
print(f'A tradução de {pl} é {translated}')
print('Deseja continuar?')
print('='*80)
print('''
[ c ] Continuar...
[ m ] Menu de Crawler
''')
print('='*80)
choice = str(input('O que vai ser? '))
if choice == 'c':
volt = True
lingua1 = lingua2 = ''
if choice == 'm':
system('cls')
volt = False
```
#### File: Central_of_Functions-v2/math0/polegada_centimetro.py
```python
from os import system
def centimeter():
while True:
c = 2.54
print('='*80)
ask = float(input('Digite o valor a ser convertido: '))
convert0 = float(ask)
convert1 = convert0 * c
print(f'Você digitou {convert0} e a conversão resultou em {convert1}cm')
print('='*80)
choice = str(input('Digite 1 e pressione Enter... '))
if choice == '1':
system('cls')
break
```
|
{
"source": "Jeffew/download-articles-by-title",
"score": 3
}
|
#### File: Jeffew/download-articles-by-title/download.py
```python
from concurrent.futures.thread import ThreadPoolExecutor
from Crawler import Crawler
# This function will download all PDFs it can to Articles directory
def download_articles_from(titles_list):
crawler = Crawler()
print("Starting download")
pool = ThreadPoolExecutor(max_workers=5)
for title in titles_list:
pool.submit(crawler.search, title)
pool.shutdown(wait=True)
crawler.write_fails()
```
|
{
"source": "JefffHofffman/trio",
"score": 2
}
|
#### File: _core/tests/test_multierror.py
```python
import logging
import pytest
from traceback import extract_tb, print_exception, format_exception, _cause_message
import sys
import os
import re
from pathlib import Path
import subprocess
from .tutil import slow
from .._multierror import MultiError, concat_tb
from ..._core import open_nursery
class NotHashableException(Exception):
code = None
def __init__(self, code):
super().__init__()
self.code = code
def __eq__(self, other):
if not isinstance(other, NotHashableException):
return False
return self.code == other.code
async def raise_nothashable(code):
raise NotHashableException(code)
def raiser1():
raiser1_2()
def raiser1_2():
raiser1_3()
def raiser1_3():
raise ValueError("raiser1_string")
def raiser2():
raiser2_2()
def raiser2_2():
raise KeyError("raiser2_string")
def raiser3():
raise NameError
def get_exc(raiser):
try:
raiser()
except Exception as exc:
return exc
def get_tb(raiser):
return get_exc(raiser).__traceback__
def einfo(exc):
return (type(exc), exc, exc.__traceback__)
def test_concat_tb():
tb1 = get_tb(raiser1)
tb2 = get_tb(raiser2)
# These return a list of (filename, lineno, fn name, text) tuples
# https://docs.python.org/3/library/traceback.html#traceback.extract_tb
entries1 = extract_tb(tb1)
entries2 = extract_tb(tb2)
tb12 = concat_tb(tb1, tb2)
assert extract_tb(tb12) == entries1 + entries2
tb21 = concat_tb(tb2, tb1)
assert extract_tb(tb21) == entries2 + entries1
# Check degenerate cases
assert extract_tb(concat_tb(None, tb1)) == entries1
assert extract_tb(concat_tb(tb1, None)) == entries1
assert concat_tb(None, None) is None
# Make sure the original tracebacks didn't get mutated by mistake
assert extract_tb(get_tb(raiser1)) == entries1
assert extract_tb(get_tb(raiser2)) == entries2
def test_MultiError():
exc1 = get_exc(raiser1)
exc2 = get_exc(raiser2)
assert MultiError([exc1]) is exc1
m = MultiError([exc1, exc2])
assert m.exceptions == [exc1, exc2]
assert "ValueError" in str(m)
assert "ValueError" in repr(m)
with pytest.raises(TypeError):
MultiError(object())
with pytest.raises(TypeError):
MultiError([KeyError(), ValueError])
def test_MultiErrorOfSingleMultiError():
# For MultiError([MultiError]), ensure there is no bad recursion by the
# constructor where __init__ is called if __new__ returns a bare MultiError.
exceptions = [KeyError(), ValueError()]
a = MultiError(exceptions)
b = MultiError([a])
assert b == a
assert b.exceptions == exceptions
async def test_MultiErrorNotHashable():
exc1 = NotHashableException(42)
exc2 = NotHashableException(4242)
exc3 = ValueError()
assert exc1 != exc2
assert exc1 != exc3
with pytest.raises(MultiError):
async with open_nursery() as nursery:
nursery.start_soon(raise_nothashable, 42)
nursery.start_soon(raise_nothashable, 4242)
def test_MultiError_filter_NotHashable():
excs = MultiError([NotHashableException(42), ValueError()])
def handle_ValueError(exc):
if isinstance(exc, ValueError):
return None
else:
return exc
filtered_excs = MultiError.filter(handle_ValueError, excs)
assert isinstance(filtered_excs, NotHashableException)
def test_traceback_recursion():
exc1 = RuntimeError()
exc2 = KeyError()
exc3 = NotHashableException(42)
# Note how this creates a loop, where exc1 refers to exc1
# This could trigger an infinite recursion; the 'seen' set is supposed to prevent
# this.
exc1.__cause__ = MultiError([exc1, exc2, exc3])
# python traceback.TracebackException < 3.6.4 does not support unhashable exceptions
# and raises a TypeError exception
if sys.version_info < (3, 6, 4):
with pytest.raises(TypeError):
format_exception(*einfo(exc1))
else:
format_exception(*einfo(exc1))
def make_tree():
# Returns an object like:
# MultiError([
# MultiError([
# ValueError,
# KeyError,
# ]),
# NameError,
# ])
# where all exceptions except the root have a non-trivial traceback.
exc1 = get_exc(raiser1)
exc2 = get_exc(raiser2)
exc3 = get_exc(raiser3)
# Give m12 a non-trivial traceback
try:
raise MultiError([exc1, exc2])
except BaseException as m12:
return MultiError([m12, exc3])
def assert_tree_eq(m1, m2):
if m1 is None or m2 is None:
assert m1 is m2
return
assert type(m1) is type(m2)
assert extract_tb(m1.__traceback__) == extract_tb(m2.__traceback__)
assert_tree_eq(m1.__cause__, m2.__cause__)
assert_tree_eq(m1.__context__, m2.__context__)
if isinstance(m1, MultiError):
assert len(m1.exceptions) == len(m2.exceptions)
for e1, e2 in zip(m1.exceptions, m2.exceptions):
assert_tree_eq(e1, e2)
def test_MultiError_filter():
def null_handler(exc):
return exc
m = make_tree()
assert_tree_eq(m, m)
assert MultiError.filter(null_handler, m) is m
assert_tree_eq(m, make_tree())
# Make sure we don't pick up any detritus if run in a context where
# implicit exception chaining would like to kick in
m = make_tree()
try:
raise ValueError
except ValueError:
assert MultiError.filter(null_handler, m) is m
assert_tree_eq(m, make_tree())
def simple_filter(exc):
if isinstance(exc, ValueError):
return None
if isinstance(exc, KeyError):
return RuntimeError()
return exc
new_m = MultiError.filter(simple_filter, make_tree())
assert isinstance(new_m, MultiError)
assert len(new_m.exceptions) == 2
# was: [[ValueError, KeyError], NameError]
# ValueError disappeared & KeyError became RuntimeError, so now:
assert isinstance(new_m.exceptions[0], RuntimeError)
assert isinstance(new_m.exceptions[1], NameError)
# implicit chaining:
assert isinstance(new_m.exceptions[0].__context__, KeyError)
# also, the traceback on the KeyError incorporates what used to be the
# traceback on its parent MultiError
orig = make_tree()
# make sure we have the right path
assert isinstance(orig.exceptions[0].exceptions[1], KeyError)
# get original traceback summary
orig_extracted = (
extract_tb(orig.__traceback__) +
extract_tb(orig.exceptions[0].__traceback__) +
extract_tb(orig.exceptions[0].exceptions[1].__traceback__)
)
def p(exc):
print_exception(type(exc), exc, exc.__traceback__)
p(orig)
p(orig.exceptions[0])
p(orig.exceptions[0].exceptions[1])
p(new_m.exceptions[0].__context__)
# compare to the new path
assert new_m.__traceback__ is None
new_extracted = extract_tb(new_m.exceptions[0].__context__.__traceback__)
assert orig_extracted == new_extracted
# check preserving partial tree
def filter_NameError(exc):
if isinstance(exc, NameError):
return None
return exc
m = make_tree()
new_m = MultiError.filter(filter_NameError, m)
# with the NameError gone, the other branch gets promoted
assert new_m is m.exceptions[0]
# check fully handling everything
def filter_all(exc):
return None
assert MultiError.filter(filter_all, make_tree()) is None
def test_MultiError_catch():
# No exception to catch
def noop(_):
pass # pragma: no cover
with MultiError.catch(noop):
pass
# Simple pass-through of all exceptions
m = make_tree()
with pytest.raises(MultiError) as excinfo:
with MultiError.catch(lambda exc: exc):
raise m
assert excinfo.value is m
# Should be unchanged, except that we added a traceback frame by raising
# it here
assert m.__traceback__ is not None
assert m.__traceback__.tb_frame.f_code.co_name == "test_MultiError_catch"
assert m.__traceback__.tb_next is None
m.__traceback__ = None
assert_tree_eq(m, make_tree())
# Swallows everything
with MultiError.catch(lambda _: None):
raise make_tree()
def simple_filter(exc):
if isinstance(exc, ValueError):
return None
if isinstance(exc, KeyError):
return RuntimeError()
return exc
with pytest.raises(MultiError) as excinfo:
with MultiError.catch(simple_filter):
raise make_tree()
new_m = excinfo.value
assert isinstance(new_m, MultiError)
assert len(new_m.exceptions) == 2
# was: [[ValueError, KeyError], NameError]
# ValueError disappeared & KeyError became RuntimeError, so now:
assert isinstance(new_m.exceptions[0], RuntimeError)
assert isinstance(new_m.exceptions[1], NameError)
# Make sure that Python did not successfully attach the old MultiError to
# our new MultiError's __context__
assert not new_m.__suppress_context__
assert new_m.__context__ is None
# check preservation of __cause__ and __context__
v = ValueError()
v.__cause__ = KeyError()
with pytest.raises(ValueError) as excinfo:
with MultiError.catch(lambda exc: exc):
raise v
assert isinstance(excinfo.value.__cause__, KeyError)
v = ValueError()
context = KeyError()
v.__context__ = context
with pytest.raises(ValueError) as excinfo:
with MultiError.catch(lambda exc: exc):
raise v
assert excinfo.value.__context__ is context
assert not excinfo.value.__suppress_context__
for suppress_context in [True, False]:
v = ValueError()
context = KeyError()
v.__context__ = context
v.__suppress_context__ = suppress_context
distractor = RuntimeError()
with pytest.raises(ValueError) as excinfo:
def catch_RuntimeError(exc):
if isinstance(exc, RuntimeError):
return None
else:
return exc
with MultiError.catch(catch_RuntimeError):
raise MultiError([v, distractor])
assert excinfo.value.__context__ is context
assert excinfo.value.__suppress_context__ == suppress_context
def assert_match_in_seq(pattern_list, string):
offset = 0
print("looking for pattern matches...")
for pattern in pattern_list:
print("checking pattern:", pattern)
reobj = re.compile(pattern)
match = reobj.search(string, offset)
assert match is not None
offset = match.end()
def test_assert_match_in_seq():
assert_match_in_seq(["a", "b"], "xx a xx b xx")
assert_match_in_seq(["b", "a"], "xx b xx a xx")
with pytest.raises(AssertionError):
assert_match_in_seq(["a", "b"], "xx b xx a xx")
def test_format_exception():
exc = get_exc(raiser1)
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" not in formatted
assert "in raiser2_2" not in formatted
assert "direct cause" not in formatted
assert "During handling" not in formatted
exc = get_exc(raiser1)
exc.__cause__ = get_exc(raiser2)
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" in formatted
assert "in raiser2_2" in formatted
assert "direct cause" in formatted
assert "During handling" not in formatted
# ensure cause included
assert _cause_message in formatted
exc = get_exc(raiser1)
exc.__context__ = get_exc(raiser2)
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" in formatted
assert "in raiser2_2" in formatted
assert "direct cause" not in formatted
assert "During handling" in formatted
exc.__suppress_context__ = True
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" not in formatted
assert "in raiser2_2" not in formatted
assert "direct cause" not in formatted
assert "During handling" not in formatted
# chain=False
exc = get_exc(raiser1)
exc.__context__ = get_exc(raiser2)
formatted = "".join(format_exception(*einfo(exc), chain=False))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" not in formatted
assert "in raiser2_2" not in formatted
assert "direct cause" not in formatted
assert "During handling" not in formatted
# limit
exc = get_exc(raiser1)
exc.__context__ = get_exc(raiser2)
# get_exc adds a frame that counts against the limit, so limit=2 means we
# get 1 deep into the raiser stack
formatted = "".join(format_exception(*einfo(exc), limit=2))
print(formatted)
assert "raiser1_string" in formatted
assert "in raiser1" in formatted
assert "in raiser1_2" not in formatted
assert "raiser2_string" in formatted
assert "in raiser2" in formatted
assert "in raiser2_2" not in formatted
exc = get_exc(raiser1)
exc.__context__ = get_exc(raiser2)
formatted = "".join(format_exception(*einfo(exc), limit=1))
print(formatted)
assert "raiser1_string" in formatted
assert "in raiser1" not in formatted
assert "raiser2_string" in formatted
assert "in raiser2" not in formatted
# handles loops
exc = get_exc(raiser1)
exc.__cause__ = exc
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" not in formatted
assert "in raiser2_2" not in formatted
# ensure duplicate exception is not included as cause
assert _cause_message not in formatted
# MultiError
formatted = "".join(format_exception(*einfo(make_tree())))
print(formatted)
assert_match_in_seq(
[
# Outer exception is MultiError
r"MultiError:",
# First embedded exception is the embedded MultiError
r"\nDetails of embedded exception 1",
# Which has a single stack frame from make_tree raising it
r"in make_tree",
# Then it has two embedded exceptions
r" Details of embedded exception 1",
r"in raiser1_2",
# for some reason ValueError has no quotes
r"ValueError: raiser1_string",
r" Details of embedded exception 2",
r"in raiser2_2",
# But KeyError does have quotes
r"KeyError: 'raiser2_string'",
# And finally the NameError, which is a sibling of the embedded
# MultiError
r"\nDetails of embedded exception 2:",
r"in raiser3",
r"NameError",
],
formatted
)
# Prints duplicate exceptions in sub-exceptions
exc1 = get_exc(raiser1)
def raise1_raiser1():
try:
raise exc1
except:
raise ValueError("foo")
def raise2_raiser1():
try:
raise exc1
except:
raise KeyError("bar")
exc2 = get_exc(raise1_raiser1)
exc3 = get_exc(raise2_raiser1)
try:
raise MultiError([exc2, exc3])
except MultiError as e:
exc = e
formatted = "".join(format_exception(*einfo(exc)))
print(formatted)
assert_match_in_seq(
[
r"Traceback",
# Outer exception is MultiError
r"MultiError:",
# First embedded exception is the embedded ValueError with cause of raiser1
r"\nDetails of embedded exception 1",
# Print details of exc1
r" Traceback",
r"in get_exc",
r"in raiser1",
r"ValueError: raiser1_string",
# Print details of exc2
r"\n During handling of the above exception, another exception occurred:",
r" Traceback",
r"in get_exc",
r"in raise1_raiser1",
r" ValueError: foo",
# Second embedded exception is the embedded KeyError with cause of raiser1
r"\nDetails of embedded exception 2",
# Print details of exc1 again
r" Traceback",
r"in get_exc",
r"in raiser1",
r"ValueError: raiser1_string",
# Print details of exc3
r"\n During handling of the above exception, another exception occurred:",
r" Traceback",
r"in get_exc",
r"in raise2_raiser1",
r" KeyError: 'bar'",
],
formatted
)
def test_logging(caplog):
exc1 = get_exc(raiser1)
exc2 = get_exc(raiser2)
m = MultiError([exc1, exc2])
message = "test test test"
try:
raise m
except MultiError as exc:
logging.getLogger().exception(message)
# Join lines together
formatted = "".join(
format_exception(type(exc), exc, exc.__traceback__)
)
assert message in caplog.text
assert formatted in caplog.text
def run_script(name, use_ipython=False):
import trio
trio_path = Path(trio.__file__).parent.parent
script_path = Path(__file__).parent / "test_multierror_scripts" / name
env = dict(os.environ)
print("parent PYTHONPATH:", env.get("PYTHONPATH"))
if "PYTHONPATH" in env: # pragma: no cover
pp = env["PYTHONPATH"].split(os.pathsep)
else:
pp = []
pp.insert(0, str(trio_path))
pp.insert(0, str(script_path.parent))
env["PYTHONPATH"] = os.pathsep.join(pp)
print("subprocess PYTHONPATH:", env.get("PYTHONPATH"))
if use_ipython:
lines = [script_path.open().read(), "exit()"]
cmd = [
sys.executable,
"-u",
"-m",
"IPython",
# no startup files
"--quick",
"--TerminalIPythonApp.code_to_run=" + '\n'.join(lines),
]
else:
cmd = [sys.executable, "-u", str(script_path)]
print("running:", cmd)
completed = subprocess.run(
cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
print("process output:")
print(completed.stdout.decode("utf-8"))
return completed
def check_simple_excepthook(completed):
assert_match_in_seq(
[
"in <module>",
"MultiError",
"Details of embedded exception 1",
"in exc1_fn",
"ValueError",
"Details of embedded exception 2",
"in exc2_fn",
"KeyError",
], completed.stdout.decode("utf-8")
)
def test_simple_excepthook():
completed = run_script("simple_excepthook.py")
check_simple_excepthook(completed)
def test_custom_excepthook():
# Check that user-defined excepthooks aren't overridden
completed = run_script("custom_excepthook.py")
assert_match_in_seq(
[
# The warning
"RuntimeWarning",
"already have a custom",
# The message printed by the custom hook, proving we didn't
# override it
"custom running!",
# The MultiError
"MultiError:",
],
completed.stdout.decode("utf-8")
)
# This warning is triggered by ipython 7.5.0 on python 3.8
import warnings
warnings.filterwarnings(
"ignore",
message=".*\"@coroutine\" decorator is deprecated",
category=DeprecationWarning,
module="IPython.*"
)
try:
import IPython
except ImportError: # pragma: no cover
have_ipython = False
else:
have_ipython = True
need_ipython = pytest.mark.skipif(not have_ipython, reason="need IPython")
@slow
@need_ipython
def test_ipython_exc_handler():
completed = run_script("simple_excepthook.py", use_ipython=True)
check_simple_excepthook(completed)
@slow
@need_ipython
def test_ipython_imported_but_unused():
completed = run_script("simple_excepthook_IPython.py")
check_simple_excepthook(completed)
@slow
@need_ipython
def test_ipython_custom_exc_handler():
# Check we get a nice warning (but only one!) if the user is using IPython
# and already has some other set_custom_exc handler installed.
completed = run_script("ipython_custom_exc.py", use_ipython=True)
assert_match_in_seq(
[
# The warning
"RuntimeWarning",
"IPython detected",
"skip installing Trio",
# The MultiError
"MultiError",
"ValueError",
"KeyError",
],
completed.stdout.decode("utf-8")
)
# Make sure our other warning doesn't show up
assert "custom sys.excepthook" not in completed.stdout.decode("utf-8")
```
#### File: trio/trio/_highlevel_open_tcp_stream.py
```python
from contextlib import contextmanager
import trio
from trio.socket import getaddrinfo, SOCK_STREAM, socket
__all__ = ["open_tcp_stream"]
# Implementation of RFC 6555 "Happy eyeballs"
# https://tools.ietf.org/html/rfc6555
#
# Basically, the problem here is that if we want to connect to some host, and
# DNS returns multiple IP addresses, then we don't know which of them will
# actually work -- it can happen that some of them are reachable, and some of
# them are not. One particularly common situation where this happens is on a
# host that thinks it has ipv6 connectivity, but really doesn't. But in
# principle this could happen for any kind of multi-home situation (e.g. the
# route to one mirror is down but another is up).
#
# The naive algorithm (e.g. the stdlib's socket.create_connection) would be to
# pick one of the IP addresses and try to connect; if that fails, try the
# next; etc. The problem with this is that TCP is stubborn, and if the first
# address is a blackhole then it might take a very long time (tens of seconds)
# before that connection attempt fails.
#
# That's where RFC 6555 comes in. It tells us that what we do is:
# - get the list of IPs from getaddrinfo, trusting the order it gives us (with
# one exception noted in section 5.4)
# - start a connection attempt to the first IP
# - when this fails OR if it's still going after DELAY seconds, then start a
# connection attempt to the second IP
# - when this fails OR if it's still going after another DELAY seconds, then
# start a connection attempt to the third IP
# - ... repeat until we run out of IPs.
#
# Our implementation is similarly straightforward: we spawn a chain of tasks,
# where each one (a) waits until the previous connection has failed or DELAY
# seconds have passed, (b) spawns the next task, (c) attempts to connect. As
# soon as any task crashes or succeeds, we cancel all the tasks and return.
#
# Note: this currently doesn't attempt to cache any results, so if you make
# multiple connections to the same host it'll re-run the happy-eyeballs
# algorithm each time. RFC 6555 is pretty confusing about whether this is
# allowed. Section 4 describes an algorithm that attempts ipv4 and ipv6
# simultaneously, and then says "The client MUST cache information regarding
# the outcome of each connection attempt, and it uses that information to
# avoid thrashing the network with subsequent attempts." Then section 4.2 says
# "implementations MUST prefer the first IP address family returned by the
# host's address preference policy, unless implementing a stateful
# algorithm". Here "stateful" means "one that caches information about
# previous attempts". So my reading of this is that IF you're starting ipv4
# and ipv6 at the same time then you MUST cache the result for ~ten minutes,
# but IF you're "preferring" one protocol by trying it first (like we are),
# then you don't need to cache.
#
# Caching is quite tricky: to get it right you need to do things like detect
# when the network interfaces are reconfigured, and if you get it wrong then
# connection attempts basically just don't work. So we don't even try.
# "Firefox and Chrome use 300 ms"
# https://tools.ietf.org/html/rfc6555#section-6
# Though
# https://www.researchgate.net/profile/Vaibhav_Bajpai3/publication/304568993_Measuring_the_Effects_of_Happy_Eyeballs/links/5773848e08ae6f328f6c284c/Measuring-the-Effects-of-Happy-Eyeballs.pdf
# claims that Firefox actually uses 0 ms, unless an about:config option is
# toggled and then it uses 250 ms.
DEFAULT_DELAY = 0.250
# How should we call getaddrinfo? In particular, should we use AI_ADDRCONFIG?
#
# The idea of AI_ADDRCONFIG is that it only returns addresses that might
# work. E.g., if getaddrinfo knows that you don't have any IPv6 connectivity,
# then it doesn't return any IPv6 addresses. And this is kinda nice, because
# it means maybe you can skip sending AAAA requests entirely. But in practice,
# it doesn't really work right.
#
# - on Linux/glibc, empirically, the default is to return all addresses, and
# with AI_ADDRCONFIG then it only returns IPv6 addresses if there is at least
# one non-loopback IPv6 address configured... but this can be a link-local
# address, so in practice I guess this is basically always configured if IPv6
# is enabled at all. OTOH if you pass in "::1" as the target address with
# AI_ADDRCONFIG and there's no *external* IPv6 address configured, you get an
# error. So AI_ADDRCONFIG mostly doesn't do anything, even when you would want
# it to, and when it does do something it might break things that would have
# worked.
#
# - on Windows 10, empirically, if no IPv6 address is configured then by
# default they are also suppressed from getaddrinfo (flags=0 and
# flags=AI_ADDRCONFIG seem to do the same thing). If you pass AI_ALL, then you
# get the full list.
# ...except for localhost! getaddrinfo("localhost", "80") gives me ::1, even
# though there's no ipv6 and other queries only return ipv4.
# If you pass in and IPv6 IP address as the target address, then that's always
# returned OK, even with AI_ADDRCONFIG set and no IPv6 configured.
#
# But I guess other versions of windows messed this up, judging from these bug
# reports:
# https://bugs.chromium.org/p/chromium/issues/detail?id=5234
# https://bugs.chromium.org/p/chromium/issues/detail?id=32522#c50
#
# So basically the options are either to use AI_ADDRCONFIG and then add some
# complicated special cases to work around its brokenness, or else don't use
# AI_ADDRCONFIG and accept that sometimes on legacy/misconfigured networks
# we'll waste 300 ms trying to connect to a blackholed destination.
#
# Twisted and Tornado always uses default flags. I think we'll do the same.
@contextmanager
def close_all():
sockets_to_close = set()
try:
yield sockets_to_close
finally:
errs = []
for sock in sockets_to_close:
try:
sock.close()
except BaseException as exc:
errs.append(exc)
if errs:
raise trio.MultiError(errs)
def reorder_for_rfc_6555_section_5_4(targets):
# RFC 6555 section 5.4 says that if getaddrinfo returns multiple address
# families (e.g. IPv4 and IPv6), then you should make sure that your first
# and second attempts use different families:
#
# https://tools.ietf.org/html/rfc6555#section-5.4
#
# This function post-processes the results from getaddrinfo, in-place, to
# satisfy this requirement.
for i in range(1, len(targets)):
if targets[i][0] != targets[0][0]:
# Found the first entry with a different address family; move it
# so that it becomes the second item on the list.
if i != 1:
targets.insert(1, targets.pop(i))
break
def format_host_port(host, port):
host = host.decode("ascii") if isinstance(host, bytes) else host
if ":" in host:
return "[{}]:{}".format(host, port)
else:
return "{}:{}".format(host, port)
# Twisted's HostnameEndpoint has a good set of configurables:
# https://twistedmatrix.com/documents/current/api/twisted.internet.endpoints.HostnameEndpoint.html
#
# - per-connection timeout
# this doesn't seem useful -- we let you set a timeout on the whole thing
# using Trio's normal mechanisms, and that seems like enough
# - delay between attempts
# - bind address (but not port!)
# they *don't* support multiple address bindings, like giving the ipv4 and
# ipv6 addresses of the host.
# I think maybe our semantics should be: we accept a list of bind addresses,
# and we bind to the first one that is compatible with the
# connection attempt we want to make, and if none are compatible then we
# don't try to connect to that target.
#
# XX TODO: implement bind address support
#
# Actually, the best option is probably to be explicit: {AF_INET: "...",
# AF_INET6: "..."}
# this might be simpler after
async def open_tcp_stream(
host,
port,
*,
# No trailing comma b/c bpo-9232 (fixed in py36)
happy_eyeballs_delay=DEFAULT_DELAY
):
"""Connect to the given host and port over TCP.
If the given ``host`` has multiple IP addresses associated with it, then
we have a problem: which one do we use?
One approach would be to attempt to connect to the first one, and then if
that fails, attempt to connect to the second one ... until we've tried all
of them. But the problem with this is that if the first IP address is
unreachable (for example, because it's an IPv6 address and our network
discards IPv6 packets), then we might end up waiting tens of seconds for
the first connection attempt to timeout before we try the second address.
Another approach would be to attempt to connect to all of the addresses at
the same time, in parallel, and then use whichever connection succeeds
first, abandoning the others. This would be fast, but create a lot of
unnecessary load on the network and the remote server.
This function strikes a balance between these two extremes: it works its
way through the available addresses one at a time, like the first
approach; but, if ``happy_eyeballs_delay`` seconds have passed and it's
still waiting for an attempt to succeed or fail, then it gets impatient
and starts the next connection attempt in parallel. As soon as any one
connection attempt succeeds, all the other attempts are cancelled. This
avoids unnecessary load because most connections will succeed after just
one or two attempts, but if one of the addresses is unreachable then it
doesn't slow us down too much.
This is known as a "happy eyeballs" algorithm, and our particular variant
is modelled after how Chrome connects to webservers; see `RFC 6555
<https://tools.ietf.org/html/rfc6555>`__ for more details.
Args:
host (str or bytes): The host to connect to. Can be an IPv4 address,
IPv6 address, or a hostname.
port (int): The port to connect to.
happy_eyeballs_delay (float): How many seconds to wait for each
connection attempt to succeed or fail before getting impatient and
starting another one in parallel. Set to `math.inf` if you want
to limit to only one connection attempt at a time (like
:func:`socket.create_connection`). Default: 0.3 (300 ms).
Returns:
SocketStream: a :class:`~trio.abc.Stream` connected to the given server.
Raises:
OSError: if the connection fails.
See also:
open_ssl_over_tcp_stream
"""
# To keep our public API surface smaller, rule out some cases that
# getaddrinfo will accept in some circumstances, but that act weird or
# have non-portable behavior or are just plain not useful.
# No type check on host though b/c we want to allow bytes-likes.
if host is None:
raise ValueError("host cannot be None")
if not isinstance(port, int):
raise TypeError("port must be int, not {!r}".format(port))
if happy_eyeballs_delay is None:
happy_eyeballs_delay = DEFAULT_DELAY
targets = await getaddrinfo(host, port, type=SOCK_STREAM)
# I don't think this can actually happen -- if there are no results,
# getaddrinfo should have raised OSError instead of returning an empty
# list. But let's be paranoid and handle it anyway:
if not targets:
msg = "no results found for hostname lookup: {}".format(
format_host_port(host, port)
)
raise OSError(msg)
reorder_for_rfc_6555_section_5_4(targets)
# This list records all the connection failures that we ignored.
oserrors = []
# Keeps track of the socket that we're going to complete with,
# need to make sure this isn't automatically closed
winning_socket = None
# Try connecting to the specified address. Possible outcomes:
# - success: record connected socket in winning_socket and cancel
# concurrent attempts
# - failure: record exception in oserrors, set attempt_failed allowing
# the next connection attempt to start early
# code needs to ensure sockets can be closed appropriately in the
# face of crash or cancellation
async def attempt_connect(socket_args, sockaddr, attempt_failed):
nonlocal winning_socket
try:
sock = socket(*socket_args)
open_sockets.add(sock)
await sock.connect(sockaddr)
# Success! Save the winning socket and cancel all outstanding
# connection attempts.
winning_socket = sock
nursery.cancel_scope.cancel()
except OSError as exc:
# This connection attempt failed, but the next one might
# succeed. Save the error for later so we can report it if
# everything fails, and tell the next attempt that it should go
# ahead (if it hasn't already).
oserrors.append(exc)
attempt_failed.set()
with close_all() as open_sockets:
# nursery spawns a task for each connection attempt, will be
# cancelled by the task that gets a successful connection
async with trio.open_nursery() as nursery:
for *sa, _, addr in targets:
# create an event to indicate connection failure,
# allowing the next target to be tried early
attempt_failed = trio.Event()
nursery.start_soon(attempt_connect, sa, addr, attempt_failed)
# give this attempt at most this time before moving on
with trio.move_on_after(happy_eyeballs_delay):
await attempt_failed.wait()
# nothing succeeded
if winning_socket is None:
assert len(oserrors) == len(targets)
msg = "all attempts to connect to {} failed".format(
format_host_port(host, port)
)
raise OSError(msg) from trio.MultiError(oserrors)
else:
stream = trio.SocketStream(winning_socket)
open_sockets.remove(winning_socket)
return stream
```
#### File: trio/_subprocess_platform/__init__.py
```python
import os
from typing import Tuple
from .. import _core, _subprocess
from .._abc import SendStream, ReceiveStream
# Fallback versions of the functions provided -- implementations
# per OS are imported atop these at the bottom of the module.
async def wait_child_exiting(process: "_subprocess.Process") -> None:
"""Block until the child process managed by ``process`` is exiting.
It is invalid to call this function if the process has already
been waited on; that is, ``process.returncode`` must be None.
When this function returns, it indicates that a call to
:meth:`subprocess.Popen.wait` will immediately be able to
return the process's exit status. The actual exit status is not
consumed by this call, since :class:`~subprocess.Popen` wants
to be able to do that itself.
"""
raise NotImplementedError from wait_child_exiting._error # pragma: no cover
def create_pipe_to_child_stdin() -> Tuple[SendStream, int]:
"""Create a new pipe suitable for sending data from this
process to the standard input of a child we're about to spawn.
Returns:
A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is a
:class:`~trio.abc.SendStream` and ``subprocess_end`` is
something suitable for passing as the ``stdin`` argument of
:class:`subprocess.Popen`.
"""
raise NotImplementedError from ( # pragma: no cover
create_pipe_to_child_stdin._error
)
def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]:
"""Create a new pipe suitable for receiving data into this
process from the standard output or error stream of a child
we're about to spawn.
Returns:
A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is a
:class:`~trio.abc.ReceiveStream` and ``subprocess_end`` is
something suitable for passing as the ``stdin`` argument of
:class:`subprocess.Popen`.
"""
raise NotImplementedError from ( # pragma: no cover
create_pipe_to_child_stdin._error
)
try:
if os.name == "nt":
from .windows import wait_child_exiting # noqa: F811
elif hasattr(_core, "wait_kevent"):
from .kqueue import wait_child_exiting # noqa: F811
else:
from .waitid import wait_child_exiting # noqa: F811
except ImportError as ex: # pragma: no cover
wait_child_exiting._error = ex
try:
if os.name == "posix":
from .._unix_pipes import PipeSendStream, PipeReceiveStream
def create_pipe_to_child_stdin(): # noqa: F811
rfd, wfd = os.pipe()
return PipeSendStream(wfd), rfd
def create_pipe_from_child_output(): # noqa: F811
rfd, wfd = os.pipe()
return PipeReceiveStream(rfd), wfd
elif os.name == "nt":
from .._windows_pipes import PipeSendStream, PipeReceiveStream
# This isn't exported or documented, but it's also not
# underscore-prefixed, and seems kosher to use. The asyncio docs
# for 3.5 included an example that imported socketpair from
# windows_utils (before socket.socketpair existed on Windows), and
# when asyncio.windows_utils.socketpair was removed in 3.7, the
# removal was mentioned in the release notes.
from asyncio.windows_utils import pipe as windows_pipe
import msvcrt
def create_pipe_to_child_stdin(): # noqa: F811
# for stdin, we want the write end (our end) to use overlapped I/O
rh, wh = windows_pipe(overlapped=(False, True))
return PipeSendStream(wh), msvcrt.open_osfhandle(rh, os.O_RDONLY)
def create_pipe_from_child_output(): # noqa: F811
# for stdout/err, it's the read end that's overlapped
rh, wh = windows_pipe(overlapped=(True, False))
return PipeReceiveStream(rh), msvcrt.open_osfhandle(wh, 0)
else: # pragma: no cover
raise ImportError("pipes not implemented on this platform")
except ImportError as ex: # pragma: no cover
create_pipe_to_child_stdin._error = ex
create_pipe_from_child_output._error = ex
```
#### File: trio/_subprocess_platform/kqueue.py
```python
import select
from .. import _core, _subprocess
async def wait_child_exiting(process: "_subprocess.Process") -> None:
kqueue = _core.current_kqueue()
try:
from select import KQ_NOTE_EXIT
except ImportError: # pragma: no cover
# pypy doesn't define KQ_NOTE_EXIT:
# https://bitbucket.org/pypy/pypy/issues/2921/
# I verified this value against both Darwin and FreeBSD
KQ_NOTE_EXIT = 0x80000000
make_event = lambda flags: select.kevent(
process.pid,
filter=select.KQ_FILTER_PROC,
flags=flags,
fflags=KQ_NOTE_EXIT
)
try:
kqueue.control(
[make_event(select.KQ_EV_ADD | select.KQ_EV_ONESHOT)], 0
)
except ProcessLookupError: # pragma: no cover
# This can supposedly happen if the process is in the process
# of exiting, and it can even be the case that kqueue says the
# process doesn't exist before waitpid(WNOHANG) says it hasn't
# exited yet. See the discussion in https://chromium.googlesource.com/
# chromium/src/base/+/master/process/kill_mac.cc .
# We haven't actually seen this error occur since we added
# locking to prevent multiple calls to wait_child_exiting()
# for the same process simultaneously, but given the explanation
# in Chromium it seems we should still keep the check.
return
def abort(_):
kqueue.control([make_event(select.KQ_EV_DELETE)], 0)
return _core.Abort.SUCCEEDED
await _core.wait_kevent(process.pid, select.KQ_FILTER_PROC, abort)
```
#### File: trio/tests/test_highlevel_generic.py
```python
import pytest
import attr
from ..abc import SendStream, ReceiveStream
from .._highlevel_generic import StapledStream
@attr.s
class RecordSendStream(SendStream):
record = attr.ib(default=attr.Factory(list))
async def send_all(self, data):
self.record.append(("send_all", data))
async def wait_send_all_might_not_block(self):
self.record.append("wait_send_all_might_not_block")
async def aclose(self):
self.record.append("aclose")
@attr.s
class RecordReceiveStream(ReceiveStream):
record = attr.ib(default=attr.Factory(list))
async def receive_some(self, max_bytes):
self.record.append(("receive_some", max_bytes))
async def aclose(self):
self.record.append("aclose")
async def test_StapledStream():
send_stream = RecordSendStream()
receive_stream = RecordReceiveStream()
stapled = StapledStream(send_stream, receive_stream)
assert stapled.send_stream is send_stream
assert stapled.receive_stream is receive_stream
await stapled.send_all(b"foo")
await stapled.wait_send_all_might_not_block()
assert send_stream.record == [
("send_all", b"foo"),
"wait_send_all_might_not_block",
]
send_stream.record.clear()
await stapled.send_eof()
assert send_stream.record == ["aclose"]
send_stream.record.clear()
async def fake_send_eof():
send_stream.record.append("send_eof")
send_stream.send_eof = fake_send_eof
await stapled.send_eof()
assert send_stream.record == ["send_eof"]
send_stream.record.clear()
assert receive_stream.record == []
await stapled.receive_some(1234)
assert receive_stream.record == [("receive_some", 1234)]
assert send_stream.record == []
receive_stream.record.clear()
await stapled.aclose()
assert receive_stream.record == ["aclose"]
assert send_stream.record == ["aclose"]
async def test_StapledStream_with_erroring_close():
# Make sure that if one of the aclose methods errors out, then the other
# one still gets called.
class BrokenSendStream(RecordSendStream):
async def aclose(self):
await super().aclose()
raise ValueError
class BrokenReceiveStream(RecordReceiveStream):
async def aclose(self):
await super().aclose()
raise ValueError
stapled = StapledStream(BrokenSendStream(), BrokenReceiveStream())
with pytest.raises(ValueError) as excinfo:
await stapled.aclose()
assert isinstance(excinfo.value.__context__, ValueError)
assert stapled.send_stream.record == ["aclose"]
assert stapled.receive_stream.record == ["aclose"]
```
#### File: trio/tests/test_highlevel_open_tcp_listeners.py
```python
import pytest
import socket as stdlib_socket
import errno
import attr
import trio
from trio import (
open_tcp_listeners, serve_tcp, SocketListener, open_tcp_stream
)
from trio.testing import open_stream_to_socket_listener
from .. import socket as tsocket
from .._core.tests.tutil import slow, creates_ipv6, binds_ipv6
async def test_open_tcp_listeners_basic():
listeners = await open_tcp_listeners(0)
assert isinstance(listeners, list)
for obj in listeners:
assert isinstance(obj, SocketListener)
# Binds to wildcard address by default
assert obj.socket.family in [tsocket.AF_INET, tsocket.AF_INET6]
assert obj.socket.getsockname()[0] in ["0.0.0.0", "::"]
listener = listeners[0]
# Make sure the backlog is at least 2
c1 = await open_stream_to_socket_listener(listener)
c2 = await open_stream_to_socket_listener(listener)
s1 = await listener.accept()
s2 = await listener.accept()
# Note that we don't know which client stream is connected to which server
# stream
await s1.send_all(b"x")
await s2.send_all(b"x")
assert await c1.receive_some(1) == b"x"
assert await c2.receive_some(1) == b"x"
for resource in [c1, c2, s1, s2] + listeners:
await resource.aclose()
async def test_open_tcp_listeners_specific_port_specific_host():
# Pick a port
sock = tsocket.socket()
await sock.bind(("127.0.0.1", 0))
host, port = sock.getsockname()
sock.close()
(listener,) = await open_tcp_listeners(port, host=host)
async with listener:
assert listener.socket.getsockname() == (host, port)
# Warning: this sleeps, and needs to use a real sleep -- MockClock won't
# work.
#
# Also, this measurement technique often works, but not always: sometimes SYN
# cookies get triggered, and then the backlog measured this way effectively
# becomes infinite. (In particular, this has been observed happening on
# Travis-CI.) To avoid this blowing up and eating all FDs / ephemeral ports,
# we put an upper limit on the number of connections we attempt, and if we hit
# it then we return the magic string "lots". Then
# test_open_tcp_listeners_backlog uses a special path to handle this, treating
# it as a success -- but at least we'll see in coverage if none of our test
# runs are actually running the test properly.
async def measure_backlog(listener, limit):
client_streams = []
try:
while True:
# Generally the response to the listen buffer being full is that
# the SYN gets dropped, and the client retries after 1 second. So
# we assume that any connect() call to localhost that takes >0.5
# seconds indicates a dropped SYN.
with trio.move_on_after(0.5) as cancel_scope:
client_stream = await open_stream_to_socket_listener(listener)
client_streams.append(client_stream)
if cancel_scope.cancelled_caught:
break
if len(client_streams) >= limit: # pragma: no cover
return "lots"
finally:
# The need for "no cover" here is subtle: see
# https://github.com/python-trio/trio/issues/522
for client_stream in client_streams: # pragma: no cover
await client_stream.aclose()
return len(client_streams)
@slow
async def test_open_tcp_listeners_backlog():
# Operating systems don't necessarily use the exact backlog you pass
async def check_backlog(nominal, required_min, required_max):
listeners = await open_tcp_listeners(0, backlog=nominal)
actual = await measure_backlog(listeners[0], required_max + 10)
for listener in listeners:
await listener.aclose()
print("nominal", nominal, "actual", actual)
if actual == "lots": # pragma: no cover
return
assert required_min <= actual <= required_max
await check_backlog(nominal=1, required_min=1, required_max=10)
await check_backlog(nominal=11, required_min=11, required_max=20)
@binds_ipv6
async def test_open_tcp_listeners_ipv6_v6only():
# Check IPV6_V6ONLY is working properly
(ipv6_listener,) = await open_tcp_listeners(0, host="::1")
_, port, *_ = ipv6_listener.socket.getsockname()
with pytest.raises(OSError):
await open_tcp_stream("127.0.0.1", port)
async def test_open_tcp_listeners_rebind():
(l1,) = await open_tcp_listeners(0, host="127.0.0.1")
sockaddr1 = l1.socket.getsockname()
# Plain old rebinding while it's still there should fail, even if we have
# SO_REUSEADDR set (requires SO_EXCLUSIVEADDRUSE on Windows)
probe = stdlib_socket.socket()
probe.setsockopt(stdlib_socket.SOL_SOCKET, stdlib_socket.SO_REUSEADDR, 1)
with pytest.raises(OSError):
probe.bind(sockaddr1)
# Now use the first listener to set up some connections in various states,
# and make sure that they don't create any obstacle to rebinding a second
# listener after the first one is closed.
c_established = await open_stream_to_socket_listener(l1)
s_established = await l1.accept()
c_time_wait = await open_stream_to_socket_listener(l1)
s_time_wait = await l1.accept()
# Server-initiated close leaves socket in TIME_WAIT
await s_time_wait.aclose()
await l1.aclose()
(l2,) = await open_tcp_listeners(sockaddr1[1], host="127.0.0.1")
sockaddr2 = l2.socket.getsockname()
assert sockaddr1 == sockaddr2
assert s_established.socket.getsockname() == sockaddr2
assert c_time_wait.socket.getpeername() == sockaddr2
for resource in [
l1,
l2,
c_established,
s_established,
c_time_wait,
s_time_wait,
]:
await resource.aclose()
class FakeOSError(OSError):
pass
@attr.s
class FakeSocket(tsocket.SocketType):
family = attr.ib()
type = attr.ib()
proto = attr.ib()
closed = attr.ib(default=False)
poison_listen = attr.ib(default=False)
def getsockopt(self, level, option):
if (level, option) == (tsocket.SOL_SOCKET, tsocket.SO_ACCEPTCONN):
return True
assert False # pragma: no cover
def setsockopt(self, level, option, value):
pass
async def bind(self, sockaddr):
pass
def listen(self, backlog):
if self.poison_listen:
raise FakeOSError("whoops")
def close(self):
self.closed = True
@attr.s
class FakeSocketFactory:
poison_after = attr.ib()
sockets = attr.ib(factory=list)
raise_on_family = attr.ib(factory=dict) # family => errno
def socket(self, family, type, proto):
if family in self.raise_on_family:
raise OSError(self.raise_on_family[family], "nope")
sock = FakeSocket(family, type, proto)
self.poison_after -= 1
if self.poison_after == 0:
sock.poison_listen = True
self.sockets.append(sock)
return sock
@attr.s
class FakeHostnameResolver:
family_addr_pairs = attr.ib()
async def getaddrinfo(self, host, port, family, type, proto, flags):
return [
(family, tsocket.SOCK_STREAM, 0, "", (addr, port))
for family, addr in self.family_addr_pairs
]
async def test_open_tcp_listeners_multiple_host_cleanup_on_error():
# If we were trying to bind to multiple hosts and one of them failed, they
# call get cleaned up before returning
fsf = FakeSocketFactory(3)
tsocket.set_custom_socket_factory(fsf)
tsocket.set_custom_hostname_resolver(
FakeHostnameResolver(
[
(tsocket.AF_INET, "1.1.1.1"),
(tsocket.AF_INET, "2.2.2.2"),
(tsocket.AF_INET, "3.3.3.3"),
]
)
)
with pytest.raises(FakeOSError):
await open_tcp_listeners(80, host="example.org")
assert len(fsf.sockets) == 3
for sock in fsf.sockets:
assert sock.closed
async def test_open_tcp_listeners_port_checking():
for host in ["127.0.0.1", None]:
with pytest.raises(TypeError):
await open_tcp_listeners(None, host=host)
with pytest.raises(TypeError):
await open_tcp_listeners(b"80", host=host)
with pytest.raises(TypeError):
await open_tcp_listeners("http", host=host)
async def test_serve_tcp():
async def handler(stream):
await stream.send_all(b"x")
async with trio.open_nursery() as nursery:
listeners = await nursery.start(serve_tcp, handler, 0)
stream = await open_stream_to_socket_listener(listeners[0])
async with stream:
await stream.receive_some(1) == b"x"
nursery.cancel_scope.cancel()
@pytest.mark.parametrize(
"try_families", [
{tsocket.AF_INET},
{tsocket.AF_INET6},
{tsocket.AF_INET, tsocket.AF_INET6},
]
)
@pytest.mark.parametrize(
"fail_families", [
{tsocket.AF_INET},
{tsocket.AF_INET6},
{tsocket.AF_INET, tsocket.AF_INET6},
]
)
async def test_open_tcp_listeners_some_address_families_unavailable(
try_families, fail_families
):
fsf = FakeSocketFactory(
10,
raise_on_family={
family: errno.EAFNOSUPPORT
for family in fail_families
}
)
tsocket.set_custom_socket_factory(fsf)
tsocket.set_custom_hostname_resolver(
FakeHostnameResolver([(family, "foo") for family in try_families])
)
should_succeed = try_families - fail_families
if not should_succeed:
with pytest.raises(OSError) as exc_info:
await open_tcp_listeners(80, host="example.org")
assert "This system doesn't support" in str(exc_info.value)
if isinstance(exc_info.value.__cause__, trio.MultiError):
for subexc in exc_info.value.__cause__.exceptions:
assert "nope" in str(subexc)
else:
assert isinstance(exc_info.value.__cause__, OSError)
assert "nope" in str(exc_info.value.__cause__)
else:
listeners = await open_tcp_listeners(80)
for listener in listeners:
should_succeed.remove(listener.socket.family)
assert not should_succeed
async def test_open_tcp_listeners_socket_fails_not_afnosupport():
fsf = FakeSocketFactory(
10,
raise_on_family={
tsocket.AF_INET: errno.EAFNOSUPPORT,
tsocket.AF_INET6: errno.EINVAL,
}
)
tsocket.set_custom_socket_factory(fsf)
tsocket.set_custom_hostname_resolver(
FakeHostnameResolver(
[(tsocket.AF_INET, "foo"), (tsocket.AF_INET6, "bar")]
)
)
with pytest.raises(OSError) as exc_info:
await open_tcp_listeners(80, host="example.org")
assert exc_info.value.errno == errno.EINVAL
assert exc_info.value.__cause__ is None
assert "nope" in str(exc_info.value)
```
#### File: trio/tests/test_socket.py
```python
import pytest
import attr
import os
import socket as stdlib_socket
import inspect
import tempfile
import sys as _sys
from .._core.tests.tutil import creates_ipv6, binds_ipv6
from .. import _core
from .. import _socket as _tsocket
from .. import socket as tsocket
from .._socket import _NUMERIC_ONLY, _try_sync
from ..testing import assert_checkpoints, wait_all_tasks_blocked
################################################################
# utils
################################################################
class MonkeypatchedGAI:
def __init__(self, orig_getaddrinfo):
self._orig_getaddrinfo = orig_getaddrinfo
self._responses = {}
self.record = []
# get a normalized getaddrinfo argument tuple
def _frozenbind(self, *args, **kwargs):
sig = inspect.signature(self._orig_getaddrinfo)
bound = sig.bind(*args, **kwargs)
bound.apply_defaults()
frozenbound = bound.args
assert not bound.kwargs
return frozenbound
def set(self, response, *args, **kwargs):
self._responses[self._frozenbind(*args, **kwargs)] = response
def getaddrinfo(self, *args, **kwargs):
bound = self._frozenbind(*args, **kwargs)
self.record.append(bound)
if bound in self._responses:
return self._responses[bound]
elif bound[-1] & stdlib_socket.AI_NUMERICHOST:
return self._orig_getaddrinfo(*args, **kwargs)
else:
raise RuntimeError(
"gai called with unexpected arguments {}".format(bound)
)
@pytest.fixture
def monkeygai(monkeypatch):
controller = MonkeypatchedGAI(stdlib_socket.getaddrinfo)
monkeypatch.setattr(stdlib_socket, "getaddrinfo", controller.getaddrinfo)
return controller
async def test__try_sync():
with assert_checkpoints():
async with _try_sync():
pass
with assert_checkpoints():
with pytest.raises(KeyError):
async with _try_sync():
raise KeyError
async with _try_sync():
raise BlockingIOError
def _is_ValueError(exc):
return isinstance(exc, ValueError)
async with _try_sync(_is_ValueError):
raise ValueError
with assert_checkpoints():
with pytest.raises(BlockingIOError):
async with _try_sync(_is_ValueError):
raise BlockingIOError
################################################################
# basic re-exports
################################################################
def test_socket_has_some_reexports():
assert tsocket.SOL_SOCKET == stdlib_socket.SOL_SOCKET
assert tsocket.TCP_NODELAY == stdlib_socket.TCP_NODELAY
assert tsocket.gaierror == stdlib_socket.gaierror
assert tsocket.ntohs == stdlib_socket.ntohs
################################################################
# name resolution
################################################################
async def test_getaddrinfo(monkeygai):
def check(got, expected):
# win32 returns 0 for the proto field
def without_proto(gai_tup):
return gai_tup[:2] + (0,) + gai_tup[3:]
expected2 = [without_proto(gt) for gt in expected]
assert got == expected or got == expected2
# Simple non-blocking non-error cases, ipv4 and ipv6:
with assert_checkpoints():
res = await tsocket.getaddrinfo(
"127.0.0.1", "12345", type=tsocket.SOCK_STREAM
)
check(res, [
(tsocket.AF_INET, # 127.0.0.1 is ipv4
tsocket.SOCK_STREAM,
tsocket.IPPROTO_TCP,
"",
("127.0.0.1", 12345)),
]) # yapf: disable
with assert_checkpoints():
res = await tsocket.getaddrinfo(
"::1", "12345", type=tsocket.SOCK_DGRAM
)
check(res, [
(tsocket.AF_INET6,
tsocket.SOCK_DGRAM,
tsocket.IPPROTO_UDP,
"",
("::1", 12345, 0, 0)),
]) # yapf: disable
monkeygai.set("x", b"host", "port", family=0, type=0, proto=0, flags=0)
with assert_checkpoints():
res = await tsocket.getaddrinfo("host", "port")
assert res == "x"
assert monkeygai.record[-1] == (b"host", "port", 0, 0, 0, 0)
# check raising an error from a non-blocking getaddrinfo
with assert_checkpoints():
with pytest.raises(tsocket.gaierror) as excinfo:
await tsocket.getaddrinfo("::1", "12345", type=-1)
# Linux, Windows
expected_errnos = {tsocket.EAI_SOCKTYPE}
# macOS
if hasattr(tsocket, "EAI_BADHINTS"):
expected_errnos.add(tsocket.EAI_BADHINTS)
assert excinfo.value.errno in expected_errnos
# check raising an error from a blocking getaddrinfo (exploits the fact
# that monkeygai raises if it gets a non-numeric request it hasn't been
# given an answer for)
with assert_checkpoints():
with pytest.raises(RuntimeError):
await tsocket.getaddrinfo("asdf", "12345")
async def test_getnameinfo():
# Trivial test:
ni_numeric = stdlib_socket.NI_NUMERICHOST | stdlib_socket.NI_NUMERICSERV
with assert_checkpoints():
got = await tsocket.getnameinfo(("127.0.0.1", 1234), ni_numeric)
assert got == ("127.0.0.1", "1234")
# getnameinfo requires a numeric address as input:
with assert_checkpoints():
with pytest.raises(tsocket.gaierror):
await tsocket.getnameinfo(("google.com", 80), 0)
with assert_checkpoints():
with pytest.raises(tsocket.gaierror):
await tsocket.getnameinfo(("localhost", 80), 0)
# Blocking call to get expected values:
host, service = stdlib_socket.getnameinfo(("127.0.0.1", 80), 0)
# Some working calls:
got = await tsocket.getnameinfo(("127.0.0.1", 80), 0)
assert got == (host, service)
got = await tsocket.getnameinfo(("127.0.0.1", 80), tsocket.NI_NUMERICHOST)
assert got == ("127.0.0.1", service)
got = await tsocket.getnameinfo(("127.0.0.1", 80), tsocket.NI_NUMERICSERV)
assert got == (host, "80")
################################################################
# constructors
################################################################
async def test_from_stdlib_socket():
sa, sb = stdlib_socket.socketpair()
assert not isinstance(sa, tsocket.SocketType)
with sa, sb:
ta = tsocket.from_stdlib_socket(sa)
assert isinstance(ta, tsocket.SocketType)
assert sa.fileno() == ta.fileno()
await ta.send(b"x")
assert sb.recv(1) == b"x"
# rejects other types
with pytest.raises(TypeError):
tsocket.from_stdlib_socket(1)
class MySocket(stdlib_socket.socket):
pass
mysock = MySocket()
with pytest.raises(TypeError):
tsocket.from_stdlib_socket(mysock)
async def test_from_fd():
sa, sb = stdlib_socket.socketpair()
ta = tsocket.fromfd(sa.fileno(), sa.family, sa.type, sa.proto)
with sa, sb, ta:
assert ta.fileno() != sa.fileno()
await ta.send(b"x")
assert sb.recv(3) == b"x"
async def test_socketpair_simple():
async def child(sock):
print("sending hello")
await sock.send(b"h")
assert await sock.recv(1) == b"h"
a, b = tsocket.socketpair()
with a, b:
async with _core.open_nursery() as nursery:
nursery.start_soon(child, a)
nursery.start_soon(child, b)
@pytest.mark.skipif(not hasattr(tsocket, "fromshare"), reason="windows only")
async def test_fromshare():
a, b = tsocket.socketpair()
with a, b:
# share with ourselves
shared = a.share(os.getpid())
a2 = tsocket.fromshare(shared)
with a2:
assert a.fileno() != a2.fileno()
await a2.send(b"x")
assert await b.recv(1) == b"x"
async def test_socket():
with tsocket.socket() as s:
assert isinstance(s, tsocket.SocketType)
assert s.family == tsocket.AF_INET
@creates_ipv6
async def test_socket_v6():
with tsocket.socket(tsocket.AF_INET6, tsocket.SOCK_DGRAM) as s:
assert isinstance(s, tsocket.SocketType)
assert s.family == tsocket.AF_INET6
@pytest.mark.skipif(not _sys.platform == "linux", reason="linux only")
async def test_sniff_sockopts():
from socket import AF_INET, AF_INET6, SOCK_DGRAM, SOCK_STREAM
# generate the combinations of families/types we're testing:
sockets = []
for family in [AF_INET, AF_INET6]:
for type in [SOCK_DGRAM, SOCK_STREAM]:
sockets.append(stdlib_socket.socket(family, type))
for socket in sockets:
# regular Trio socket constructor
tsocket_socket = tsocket.socket(fileno=socket.fileno())
# check family / type for correctness:
assert tsocket_socket.family == socket.family
assert tsocket_socket.type == socket.type
# fromfd constructor
tsocket_from_fd = tsocket.fromfd(socket.fileno(), AF_INET, SOCK_STREAM)
# check family / type for correctness:
assert tsocket_from_fd.family == socket.family
assert tsocket_from_fd.type == socket.type
socket.close()
################################################################
# _SocketType
################################################################
async def test_SocketType_basics():
sock = tsocket.socket()
with sock as cm_enter_value:
assert cm_enter_value is sock
assert isinstance(sock.fileno(), int)
assert not sock.get_inheritable()
sock.set_inheritable(True)
assert sock.get_inheritable()
sock.setsockopt(tsocket.IPPROTO_TCP, tsocket.TCP_NODELAY, False)
assert not sock.getsockopt(tsocket.IPPROTO_TCP, tsocket.TCP_NODELAY)
sock.setsockopt(tsocket.IPPROTO_TCP, tsocket.TCP_NODELAY, True)
assert sock.getsockopt(tsocket.IPPROTO_TCP, tsocket.TCP_NODELAY)
# closed sockets have fileno() == -1
assert sock.fileno() == -1
# smoke test
repr(sock)
# detach
with tsocket.socket() as sock:
fd = sock.fileno()
assert sock.detach() == fd
assert sock.fileno() == -1
# close
sock = tsocket.socket()
assert sock.fileno() >= 0
sock.close()
assert sock.fileno() == -1
# share was tested above together with fromshare
# check __dir__
assert "family" in dir(sock)
assert "recv" in dir(sock)
assert "setsockopt" in dir(sock)
# our __getattr__ handles unknown names
with pytest.raises(AttributeError):
sock.asdf
# type family proto
stdlib_sock = stdlib_socket.socket()
sock = tsocket.from_stdlib_socket(stdlib_sock)
assert sock.type == _tsocket.real_socket_type(stdlib_sock.type)
assert sock.family == stdlib_sock.family
assert sock.proto == stdlib_sock.proto
sock.close()
async def test_SocketType_dup():
a, b = tsocket.socketpair()
with a, b:
a2 = a.dup()
with a2:
assert isinstance(a2, tsocket.SocketType)
assert a2.fileno() != a.fileno()
a.close()
await a2.send(b"x")
assert await b.recv(1) == b"x"
async def test_SocketType_shutdown():
a, b = tsocket.socketpair()
with a, b:
await a.send(b"x")
assert await b.recv(1) == b"x"
assert not a.did_shutdown_SHUT_WR
assert not b.did_shutdown_SHUT_WR
a.shutdown(tsocket.SHUT_WR)
assert a.did_shutdown_SHUT_WR
assert not b.did_shutdown_SHUT_WR
assert await b.recv(1) == b""
await b.send(b"y")
assert await a.recv(1) == b"y"
a, b = tsocket.socketpair()
with a, b:
assert not a.did_shutdown_SHUT_WR
a.shutdown(tsocket.SHUT_RD)
assert not a.did_shutdown_SHUT_WR
a, b = tsocket.socketpair()
with a, b:
assert not a.did_shutdown_SHUT_WR
a.shutdown(tsocket.SHUT_RDWR)
assert a.did_shutdown_SHUT_WR
@pytest.mark.parametrize(
"address, socket_type", [
('127.0.0.1', tsocket.AF_INET),
pytest.param('::1', tsocket.AF_INET6, marks=binds_ipv6)
]
)
async def test_SocketType_simple_server(address, socket_type):
# listen, bind, accept, connect, getpeername, getsockname
listener = tsocket.socket(socket_type)
client = tsocket.socket(socket_type)
with listener, client:
await listener.bind((address, 0))
listener.listen(20)
addr = listener.getsockname()[:2]
async with _core.open_nursery() as nursery:
nursery.start_soon(client.connect, addr)
server, client_addr = await listener.accept()
with server:
assert client_addr == server.getpeername() == client.getsockname()
await server.send(b"x")
assert await client.recv(1) == b"x"
# On some macOS systems, getaddrinfo likes to return V4-mapped addresses even
# when we *don't* pass AI_V4MAPPED.
# https://github.com/python-trio/trio/issues/580
def gai_without_v4mapped_is_buggy(): # pragma: no cover
try:
stdlib_socket.getaddrinfo("1.2.3.4", 0, family=stdlib_socket.AF_INET6)
except stdlib_socket.gaierror:
return False
else:
return True
@attr.s
class Addresses:
bind_all = attr.ib()
localhost = attr.ib()
arbitrary = attr.ib()
broadcast = attr.ib()
extra = attr.ib()
# Direct thorough tests of the implicit resolver helpers
@pytest.mark.parametrize(
"socket_type, addrs", [
(
tsocket.AF_INET,
Addresses(
bind_all="0.0.0.0",
localhost="127.0.0.1",
arbitrary="1.2.3.4",
broadcast="255.255.255.255",
extra=(),
),
),
pytest.param(
tsocket.AF_INET6,
Addresses(
bind_all="::",
localhost="::1",
arbitrary="fdf8:f53e:61e4::18",
broadcast="::ffff:255.255.255.255",
extra=(0, 0),
),
marks=creates_ipv6,
),
]
)
async def test_SocketType_resolve(socket_type, addrs):
v6 = (socket_type == tsocket.AF_INET6)
# For some reason the stdlib special-cases "" to pass NULL to getaddrinfo
# They also error out on None, but whatever, None is much more consistent,
# so we accept it too.
for null in [None, ""]:
sock = tsocket.socket(family=socket_type)
got = await sock._resolve_local_address((null, 80))
assert got == (addrs.bind_all, 80, *addrs.extra)
got = await sock._resolve_remote_address((null, 80))
assert got == (addrs.localhost, 80, *addrs.extra)
# AI_PASSIVE only affects the wildcard address, so for everything else
# _resolve_local_address and _resolve_remote_address should work the same:
for resolver in ["_resolve_local_address", "_resolve_remote_address"]:
async def res(*args):
return await getattr(sock, resolver)(*args)
# yapf: disable
assert await res((addrs.arbitrary,
"http")) == (addrs.arbitrary, 80, *addrs.extra)
if v6:
assert await res(("fdf8:f53e:61e4::18", 80, 1)) == ("fdf8:f53e:61e4::18", 80, 1, 0)
assert await res(("fdf8:f53e:61e4::18", 80, 1, 2)) == ("fdf8:f53e:61e4::18", 80, 1, 2)
# V4 mapped addresses resolved if V6ONLY is False
sock.setsockopt(tsocket.IPPROTO_IPV6, tsocket.IPV6_V6ONLY, False)
assert await res(("1.2.3.4",
"http")) == ("::ffff:1.2.3.4", 80, 0, 0)
# Check the <broadcast> special case, because why not
assert await res(("<broadcast>",
123)) == (addrs.broadcast, 123, *addrs.extra)
# yapf: enable
# But not if it's true (at least on systems where getaddrinfo works
# correctly)
if v6 and not gai_without_v4mapped_is_buggy():
sock.setsockopt(tsocket.IPPROTO_IPV6, tsocket.IPV6_V6ONLY, True)
with pytest.raises(tsocket.gaierror) as excinfo:
await res(("1.2.3.4", 80))
# Windows, macOS
expected_errnos = {tsocket.EAI_NONAME}
# Linux
if hasattr(tsocket, "EAI_ADDRFAMILY"):
expected_errnos.add(tsocket.EAI_ADDRFAMILY)
assert excinfo.value.errno in expected_errnos
# A family where we know nothing about the addresses, so should just
# pass them through. This should work on Linux, which is enough to
# smoke test the basic functionality...
try:
netlink_sock = tsocket.socket(
family=tsocket.AF_NETLINK, type=tsocket.SOCK_DGRAM
)
except (AttributeError, OSError):
pass
else:
assert await getattr(netlink_sock, resolver)("asdf") == "asdf"
with pytest.raises(ValueError):
await res("1.2.3.4")
with pytest.raises(ValueError):
await res(("1.2.3.4",))
with pytest.raises(ValueError):
if v6:
await res(("1.2.3.4", 80, 0, 0, 0))
else:
await res(("1.2.3.4", 80, 0, 0))
async def test_SocketType_unresolved_names():
with tsocket.socket() as sock:
await sock.bind(("localhost", 0))
assert sock.getsockname()[0] == "127.0.0.1"
sock.listen(10)
with tsocket.socket() as sock2:
await sock2.connect(("localhost", sock.getsockname()[1]))
assert sock2.getpeername() == sock.getsockname()
# check gaierror propagates out
with tsocket.socket() as sock:
with pytest.raises(tsocket.gaierror):
# definitely not a valid request
await sock.bind(("1.2:3", -1))
# This tests all the complicated paths through _nonblocking_helper, using recv
# as a stand-in for all the methods that use _nonblocking_helper.
async def test_SocketType_non_blocking_paths():
a, b = stdlib_socket.socketpair()
with a, b:
ta = tsocket.from_stdlib_socket(a)
b.setblocking(False)
# cancel before even calling
b.send(b"1")
with _core.CancelScope() as cscope:
cscope.cancel()
with assert_checkpoints():
with pytest.raises(_core.Cancelled):
await ta.recv(10)
# immedate success (also checks that the previous attempt didn't
# actually read anything)
with assert_checkpoints():
await ta.recv(10) == b"1"
# immediate failure
with assert_checkpoints():
with pytest.raises(TypeError):
await ta.recv("haha")
# block then succeed
async def do_successful_blocking_recv():
with assert_checkpoints():
assert await ta.recv(10) == b"2"
async with _core.open_nursery() as nursery:
nursery.start_soon(do_successful_blocking_recv)
await wait_all_tasks_blocked()
b.send(b"2")
# block then cancelled
async def do_cancelled_blocking_recv():
with assert_checkpoints():
with pytest.raises(_core.Cancelled):
await ta.recv(10)
async with _core.open_nursery() as nursery:
nursery.start_soon(do_cancelled_blocking_recv)
await wait_all_tasks_blocked()
nursery.cancel_scope.cancel()
# Okay, here's the trickiest one: we want to exercise the path where
# the task is signaled to wake, goes to recv, but then the recv fails,
# so it has to go back to sleep and try again. Strategy: have two
# tasks waiting on two sockets (to work around the rule against having
# two tasks waiting on the same socket), wake them both up at the same
# time, and whichever one runs first "steals" the data from the
# other:
tb = tsocket.from_stdlib_socket(b)
async def t1():
with assert_checkpoints():
assert await ta.recv(1) == b"a"
with assert_checkpoints():
assert await tb.recv(1) == b"b"
async def t2():
with assert_checkpoints():
assert await tb.recv(1) == b"b"
with assert_checkpoints():
assert await ta.recv(1) == b"a"
async with _core.open_nursery() as nursery:
nursery.start_soon(t1)
nursery.start_soon(t2)
await wait_all_tasks_blocked()
a.send(b"b")
b.send(b"a")
await wait_all_tasks_blocked()
a.send(b"b")
b.send(b"a")
# This tests the complicated paths through connect
async def test_SocketType_connect_paths():
with tsocket.socket() as sock:
with pytest.raises(ValueError):
# Should be a tuple
await sock.connect("localhost")
# cancelled before we start
with tsocket.socket() as sock:
with _core.CancelScope() as cancel_scope:
cancel_scope.cancel()
with pytest.raises(_core.Cancelled):
await sock.connect(("127.0.0.1", 80))
# Cancelled in between the connect() call and the connect completing
with _core.CancelScope() as cancel_scope:
with tsocket.socket() as sock, tsocket.socket() as listener:
await listener.bind(("127.0.0.1", 0))
listener.listen()
# Swap in our weird subclass under the trio.socket._SocketType's
# nose -- and then swap it back out again before we hit
# wait_socket_writable, which insists on a real socket.
class CancelSocket(stdlib_socket.socket):
def connect(self, *args, **kwargs):
cancel_scope.cancel()
sock._sock = stdlib_socket.fromfd(
self.detach(), self.family, self.type
)
sock._sock.connect(*args, **kwargs)
# If connect *doesn't* raise, then pretend it did
raise BlockingIOError # pragma: no cover
sock._sock.close()
sock._sock = CancelSocket()
with assert_checkpoints():
with pytest.raises(_core.Cancelled):
await sock.connect(listener.getsockname())
assert sock.fileno() == -1
# Failed connect (hopefully after raising BlockingIOError)
with tsocket.socket() as sock:
with pytest.raises(OSError):
# TCP port 2 is not assigned. Pretty sure nothing will be
# listening there. (We used to bind a port and then *not* call
# listen() to ensure nothing was listening there, but it turns
# out on macOS if you do this it takes 30 seconds for the
# connect to fail. Really. Also if you use a non-routable
# address. This way fails instantly though. As long as nothing
# is listening on port 2.)
await sock.connect(("127.0.0.1", 2))
async def test_resolve_remote_address_exception_closes_socket():
# Here we are testing issue 247, any cancellation will leave the socket closed
with _core.CancelScope() as cancel_scope:
with tsocket.socket() as sock:
async def _resolve_remote_address(self, *args, **kwargs):
cancel_scope.cancel()
await _core.checkpoint()
sock._resolve_remote_address = _resolve_remote_address
with assert_checkpoints():
with pytest.raises(_core.Cancelled):
await sock.connect('')
assert sock.fileno() == -1
async def test_send_recv_variants():
a, b = tsocket.socketpair()
with a, b:
# recv, including with flags
assert await a.send(b"x") == 1
assert await b.recv(10, tsocket.MSG_PEEK) == b"x"
assert await b.recv(10) == b"x"
# recv_into
await a.send(b"x")
buf = bytearray(10)
await b.recv_into(buf)
assert buf == b"x" + b"\x00" * 9
if hasattr(a, "sendmsg"):
assert await a.sendmsg([b"xxx"], []) == 3
assert await b.recv(10) == b"xxx"
a = tsocket.socket(type=tsocket.SOCK_DGRAM)
b = tsocket.socket(type=tsocket.SOCK_DGRAM)
with a, b:
await a.bind(("127.0.0.1", 0))
await b.bind(("127.0.0.1", 0))
targets = [b.getsockname(), ("localhost", b.getsockname()[1])]
# recvfrom + sendto, with and without names
for target in targets:
assert await a.sendto(b"xxx", target) == 3
(data, addr) = await b.recvfrom(10)
assert data == b"xxx"
assert addr == a.getsockname()
# sendto + flags
#
# I can't find any flags that send() accepts... on Linux at least
# passing MSG_MORE to send_some on a connected UDP socket seems to
# just be ignored.
#
# But there's no MSG_MORE on Windows or macOS. I guess send_some flags
# are really not very useful, but at least this tests them a bit.
if hasattr(tsocket, "MSG_MORE"):
await a.sendto(b"xxx", tsocket.MSG_MORE, b.getsockname())
await a.sendto(b"yyy", tsocket.MSG_MORE, b.getsockname())
await a.sendto(b"zzz", b.getsockname())
(data, addr) = await b.recvfrom(10)
assert data == b"xxxyyyzzz"
assert addr == a.getsockname()
# recvfrom_into
assert await a.sendto(b"xxx", b.getsockname()) == 3
buf = bytearray(10)
(nbytes, addr) = await b.recvfrom_into(buf)
assert nbytes == 3
assert buf == b"xxx" + b"\x00" * 7
assert addr == a.getsockname()
if hasattr(b, "recvmsg"):
assert await a.sendto(b"xxx", b.getsockname()) == 3
(data, ancdata, msg_flags, addr) = await b.recvmsg(10)
assert data == b"xxx"
assert ancdata == []
assert msg_flags == 0
assert addr == a.getsockname()
if hasattr(b, "recvmsg_into"):
assert await a.sendto(b"xyzw", b.getsockname()) == 4
buf1 = bytearray(2)
buf2 = bytearray(3)
ret = await b.recvmsg_into([buf1, buf2])
(nbytes, ancdata, msg_flags, addr) = ret
assert nbytes == 4
assert buf1 == b"xy"
assert buf2 == b"zw" + b"\x00"
assert ancdata == []
assert msg_flags == 0
assert addr == a.getsockname()
if hasattr(a, "sendmsg"):
for target in targets:
assert await a.sendmsg([b"x", b"yz"], [], 0, target) == 3
assert await b.recvfrom(10) == (b"xyz", a.getsockname())
a = tsocket.socket(type=tsocket.SOCK_DGRAM)
b = tsocket.socket(type=tsocket.SOCK_DGRAM)
with a, b:
await b.bind(("127.0.0.1", 0))
await a.connect(b.getsockname())
# send on a connected udp socket; each call creates a separate
# datagram
await a.send(b"xxx")
await a.send(b"yyy")
assert await b.recv(10) == b"xxx"
assert await b.recv(10) == b"yyy"
async def test_idna(monkeygai):
# This is the encoding for "faß.de", which uses one of the characters that
# IDNA 2003 handles incorrectly:
monkeygai.set("ok faß.de", b"xn--fa-hia.de", 80)
monkeygai.set("ok ::1", "::1", 80, flags=_NUMERIC_ONLY)
monkeygai.set("ok ::1", b"::1", 80, flags=_NUMERIC_ONLY)
# Some things that should not reach the underlying socket.getaddrinfo:
monkeygai.set("bad", "fass.de", 80)
# We always call socket.getaddrinfo with bytes objects:
monkeygai.set("bad", "xn--fa-hia.de", 80)
assert "ok ::1" == await tsocket.getaddrinfo("::1", 80)
assert "ok ::1" == await tsocket.getaddrinfo(b"::1", 80)
assert "ok faß.de" == await tsocket.getaddrinfo("faß.de", 80)
assert "ok faß.de" == await tsocket.getaddrinfo("xn--fa-hia.de", 80)
assert "ok faß.de" == await tsocket.getaddrinfo(b"xn--fa-hia.de", 80)
async def test_getprotobyname():
# These are the constants used in IP header fields, so the numeric values
# had *better* be stable across systems...
assert await tsocket.getprotobyname("udp") == 17
assert await tsocket.getprotobyname("tcp") == 6
async def test_custom_hostname_resolver(monkeygai):
class CustomResolver:
async def getaddrinfo(self, host, port, family, type, proto, flags):
return ("custom_gai", host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags):
return ("custom_gni", sockaddr, flags)
cr = CustomResolver()
assert tsocket.set_custom_hostname_resolver(cr) is None
# Check that the arguments are all getting passed through.
# We have to use valid calls to avoid making the underlying system
# getaddrinfo cranky when it's used for NUMERIC checks.
for vals in [
(tsocket.AF_INET, 0, 0, 0),
(0, tsocket.SOCK_STREAM, 0, 0),
(0, 0, tsocket.IPPROTO_TCP, 0),
(0, 0, 0, tsocket.AI_CANONNAME),
]:
assert (
await tsocket.getaddrinfo("localhost", "foo", *vals) ==
("custom_gai", b"localhost", "foo", *vals)
)
# IDNA encoding is handled before calling the special object
got = await tsocket.getaddrinfo("föö", "foo")
expected = ("custom_gai", b"xn--f-1gaa", "foo", 0, 0, 0, 0)
assert got == expected
assert (await tsocket.getnameinfo("a", 0) == ("custom_gni", "a", 0))
# We can set it back to None
assert tsocket.set_custom_hostname_resolver(None) is cr
# And now Trio switches back to calling socket.getaddrinfo (specifically
# our monkeypatched version of socket.getaddrinfo)
monkeygai.set("x", b"host", "port", family=0, type=0, proto=0, flags=0)
assert await tsocket.getaddrinfo("host", "port") == "x"
async def test_custom_socket_factory():
class CustomSocketFactory:
def socket(self, family, type, proto):
return ("hi", family, type, proto)
csf = CustomSocketFactory()
assert tsocket.set_custom_socket_factory(csf) is None
assert tsocket.socket() == ("hi", tsocket.AF_INET, tsocket.SOCK_STREAM, 0)
assert tsocket.socket(1, 2, 3) == ("hi", 1, 2, 3)
# socket with fileno= doesn't call our custom method
fd = stdlib_socket.socket().detach()
wrapped = tsocket.socket(fileno=fd)
assert hasattr(wrapped, "bind")
wrapped.close()
# Likewise for socketpair
a, b = tsocket.socketpair()
with a, b:
assert hasattr(a, "bind")
assert hasattr(b, "bind")
assert tsocket.set_custom_socket_factory(None) is csf
async def test_SocketType_is_abstract():
with pytest.raises(TypeError):
tsocket.SocketType()
@pytest.mark.skipif(
not hasattr(tsocket, "AF_UNIX"), reason="no unix domain sockets"
)
async def test_unix_domain_socket():
# Bind has a special branch to use a thread, since it has to do filesystem
# traversal. Maybe connect should too? Not sure.
async def check_AF_UNIX(path):
with tsocket.socket(family=tsocket.AF_UNIX) as lsock:
await lsock.bind(path)
lsock.listen(10)
with tsocket.socket(family=tsocket.AF_UNIX) as csock:
await csock.connect(path)
ssock, _ = await lsock.accept()
await csock.send(b"x")
assert await ssock.recv(1) == b"x"
# Can't use tmpdir fixture, because we can exceed the maximum AF_UNIX path
# length on macOS.
with tempfile.TemporaryDirectory() as tmpdir:
path = "{}/sock".format(tmpdir)
await check_AF_UNIX(path)
try:
cookie = os.urandom(20).hex().encode("ascii")
await check_AF_UNIX(b"\x00trio-test-" + cookie)
except FileNotFoundError:
# macOS doesn't support abstract filenames with the leading NUL byte
pass
async def test_interrupted_by_close():
a_stdlib, b_stdlib = stdlib_socket.socketpair()
with a_stdlib, b_stdlib:
a_stdlib.setblocking(False)
data = b"x" * 99999
try:
while True:
a_stdlib.send(data)
except BlockingIOError:
pass
a = tsocket.from_stdlib_socket(a_stdlib)
async def sender():
with pytest.raises(_core.ClosedResourceError):
await a.send(data)
async def receiver():
with pytest.raises(_core.ClosedResourceError):
await a.recv(1)
async with _core.open_nursery() as nursery:
nursery.start_soon(sender)
nursery.start_soon(receiver)
await wait_all_tasks_blocked()
a.close()
```
#### File: trio/trio/_threads.py
```python
import threading
import queue as stdlib_queue
from itertools import count
import attr
import outcome
import trio
from ._sync import CapacityLimiter
from ._core import enable_ki_protection, disable_ki_protection, RunVar
__all__ = [
"run_sync_in_worker_thread",
"current_default_worker_thread_limiter",
"BlockingTrioPortal",
]
class BlockingTrioPortal:
"""A portal that synchronous threads can reach through to run code in the
Trio thread.
Most Trio functions can only be called from the Trio thread, which is
sometimes annoying. What if you really need to call a Trio function from a
worker thread? That's where :class:`BlockingTrioPortal` comes in: it's the
rare Trio object whose methods can – in fact, must! – be called from
another thread, and it allows you to call all those other functions.
There is one complication: it's possible for a single Python program to
contain multiple calls to :func:`trio.run`, either in sequence – like in a
test suite that calls :func:`trio.run` for each test – or simultaneously
in different threads. So how do you control which :func:`trio.run` your
portal opens into?
The answer is that each :class:`BlockingTrioPortal` object is associated
with one *specific* call to :func:`trio.run`.
The simplest way to set this up is to instantiate the class with no
arguments inside Trio; this automatically binds it to the context where
you instantiate it::
async def some_function():
portal = trio.BlockingTrioPortal()
await trio.run_sync_in_worker_thread(sync_fn, portal)
Alternatively, you can pass an explicit :class:`trio.hazmat.TrioToken` to
specify the :func:`trio.run` that you want your portal to connect to.
"""
def __init__(self, trio_token=None):
if trio_token is None:
trio_token = trio.hazmat.current_trio_token()
self._trio_token = trio_token
# This is the part that runs in the Trio thread
def _run_cb(self, q, afn, args):
@disable_ki_protection
async def unprotected_afn():
return await afn(*args)
async def await_in_trio_thread_task():
q.put_nowait(await outcome.acapture(unprotected_afn))
trio.hazmat.spawn_system_task(await_in_trio_thread_task, name=afn)
# This is the part that runs in the Trio thread
def _run_sync_cb(self, q, fn, args):
@disable_ki_protection
def unprotected_fn():
return fn(*args)
res = outcome.capture(unprotected_fn)
q.put_nowait(res)
def _do_it(self, cb, fn, *args):
try:
trio.hazmat.current_task()
except RuntimeError:
pass
else:
raise RuntimeError(
"this is a blocking function; call it from a thread"
)
q = stdlib_queue.Queue()
self._trio_token.run_sync_soon(cb, q, fn, args)
return q.get().unwrap()
def run(self, afn, *args):
"""Run the given async function in the Trio thread, blocking until it
is complete.
Returns or raises whatever the given function returns or raises. It
can also raise exceptions of its own:
Raises:
RunFinishedError: if the corresponding call to :func:`trio.run` has
already completed.
Cancelled: if the corresponding call to :func:`trio.run` completes
while ``afn(*args)`` is running, then ``afn`` is likely to raise
:class:`Cancelled`, and this will propagate out into
RuntimeError: if you try calling this from inside the Trio thread,
which would otherwise cause a deadlock.
"""
return self._do_it(self._run_cb, afn, *args)
def run_sync(self, fn, *args):
"""Run the given synchronous function in the Trio thread, blocking
until it is complete.
Returns or raises whatever the given function returns or raises. It
can also exceptions of its own:
Raises:
RunFinishedError: if the corresponding call to :func:`trio.run` has
already completed.
RuntimeError: if you try calling this from inside the Trio thread,
which would otherwise cause a deadlock.
"""
return self._do_it(self._run_sync_cb, fn, *args)
################################################################
# XX at some point it probably makes sense to implement some sort of thread
# pool? Or at least that's what everyone says.
#
# There are two arguments for thread pools:
# - speed (re-using threads instead of starting new ones)
# - throttling (if you have 1000 tasks, queue them up instead of spawning 1000
# threads and running out of memory)
#
# Regarding speed, it's not clear how much of an advantage this is. Some
# numbers on my Linux laptop:
#
# Spawning and then joining a thread:
#
# In [25]: %timeit t = threading.Thread(target=lambda: None); t.start(); t.join()
# 10000 loops, best of 3: 44 µs per loop
#
# Using a thread pool:
#
# In [26]: tpp = concurrent.futures.ThreadPoolExecutor()
# In [27]: %timeit tpp.submit(lambda: None).result()
# <warm up run elided>
# In [28]: %timeit tpp.submit(lambda: None).result()
# 10000 loops, best of 3: 40.8 µs per loop
#
# What's a fast getaddrinfo look like?
#
# # with hot DNS cache:
# In [23]: %timeit socket.getaddrinfo("google.com", "80")
# 10 loops, best of 3: 50.9 ms per loop
#
# In [29]: %timeit socket.getaddrinfo("127.0.0.1", "80")
# 100000 loops, best of 3: 9.73 µs per loop
#
#
# So... maybe we can beat concurrent.futures with a super-efficient thread
# pool or something, but there really is not a lot of headroom here.
#
# Of course other systems might be different... here's CPython 3.6 in a
# Virtualbox VM running Windows 10 on that same Linux laptop:
#
# In [13]: %timeit t = threading.Thread(target=lambda: None); t.start(); t.join()
# 10000 loops, best of 3: 127 µs per loop
#
# In [18]: %timeit tpp.submit(lambda: None).result()
# 10000 loops, best of 3: 31.9 µs per loop
#
# So on Windows there *might* be an advantage? You've gotta be doing a lot of
# connections, with very fast DNS indeed, for that 100 us to matter. But maybe
# someone is.
#
#
# Regarding throttling: this is very much a trade-off. On the one hand, you
# don't want to overwhelm the machine, obviously. On the other hand, queueing
# up work on a central thread-pool creates a central coordination point which
# can potentially create deadlocks and all kinds of fun things. This is very
# context dependent. For getaddrinfo, whatever, they'll make progress and
# complete (we hope), and you want to throttle them to some reasonable
# amount. For calling waitpid() (because just say no to SIGCHLD), then you
# really want one thread-per-waitpid(), because for all you know the user has
# written some ridiculous thing like:
#
# for p in processes:
# await spawn(p.wait)
# # Deadlock here if there are enough processes:
# await some_other_subprocess.wait()
# for p in processes:
# p.terminate()
#
# This goes doubly for the sort of wacky thread usage we see in curio.abide
# (though, I'm not sure if that's actually useful in practice in our context,
# run_in_trio_thread seems like it might be a nicer synchronization primitive
# for most uses than trying to make threading.Lock awaitable).
#
# See also this very relevant discussion:
#
# https://twistedmatrix.com/trac/ticket/5298
#
# "Interacting with the products at Rackspace which use Twisted, I've seen
# problems caused by thread-pool maximum sizes with some annoying
# regularity. The basic problem is this: if you have a hard limit on the
# number of threads, *it is not possible to write a correct program which may
# require starting a new thread to un-block a blocked pool thread*" - glyph
#
# For now, if we want to throttle getaddrinfo I think the simplest thing is
# for the socket code to have a semaphore for getaddrinfo calls.
#
# Regarding the memory overhead of threads, in theory one should be able to
# reduce this a *lot* for a thread that's just calling getaddrinfo or
# (especially) waitpid. Windows and pthreads both offer the ability to set
# thread stack size on a thread-by-thread basis. Unfortunately as of 3.6
# CPython doesn't expose this in a useful way (all you can do is set it
# globally for the whole process, so it's - ironically - not thread safe).
#
# (It's also unclear how much stack size actually matters; on a 64-bit Linux
# server with overcommit -- i.e., the most common configuration -- then AFAICT
# really the only real limit is on stack size actually *used*; how much you
# *allocate* should be pretty much irrelevant.)
_limiter_local = RunVar("limiter")
# I pulled this number out of the air; it isn't based on anything. Probably we
# should make some kind of measurements to pick a good value.
DEFAULT_LIMIT = 40
_worker_thread_counter = count()
def current_default_worker_thread_limiter():
"""Get the default :class:`CapacityLimiter` used by
:func:`run_sync_in_worker_thread`.
The most common reason to call this would be if you want to modify its
:attr:`~CapacityLimiter.total_tokens` attribute.
"""
try:
limiter = _limiter_local.get()
except LookupError:
limiter = CapacityLimiter(DEFAULT_LIMIT)
_limiter_local.set(limiter)
return limiter
# Eventually we might build this into a full-fledged deadlock-detection
# system; see https://github.com/python-trio/trio/issues/182
# But for now we just need an object to stand in for the thread, so we can
# keep track of who's holding the CapacityLimiter's token.
@attr.s(frozen=True, cmp=False, hash=False)
class ThreadPlaceholder:
name = attr.ib()
@enable_ki_protection
async def run_sync_in_worker_thread(
sync_fn, *args, cancellable=False, limiter=None
):
"""Convert a blocking operation into an async operation using a thread.
These two lines are equivalent::
sync_fn(*args)
await run_sync_in_worker_thread(sync_fn, *args)
except that if ``sync_fn`` takes a long time, then the first line will
block the Trio loop while it runs, while the second line allows other Trio
tasks to continue working while ``sync_fn`` runs. This is accomplished by
pushing the call to ``sync_fn(*args)`` off into a worker thread.
Args:
sync_fn: An arbitrary synchronous callable.
*args: Positional arguments to pass to sync_fn. If you need keyword
arguments, use :func:`functools.partial`.
cancellable (bool): Whether to allow cancellation of this operation. See
discussion below.
limiter (None, CapacityLimiter, or CapacityLimiter-like object):
An object used to limit the number of simultaneous threads. Most
commonly this will be a :class:`CapacityLimiter`, but it could be
anything providing compatible
:meth:`~trio.CapacityLimiter.acquire_on_behalf_of` and
:meth:`~trio.CapacityLimiter.release_on_behalf_of`
methods. :func:`run_sync_in_worker_thread` will call
``acquire_on_behalf_of`` before starting the thread, and
``release_on_behalf_of`` after the thread has finished.
If None (the default), uses the default :class:`CapacityLimiter`, as
returned by :func:`current_default_worker_thread_limiter`.
**Cancellation handling**: Cancellation is a tricky issue here, because
neither Python nor the operating systems it runs on provide any general
mechanism for cancelling an arbitrary synchronous function running in a
thread. :func:`run_sync_in_worker_thread` will always check for
cancellation on entry, before starting the thread. But once the thread is
running, there are two ways it can handle being cancelled:
* If ``cancellable=False``, the function ignores the cancellation and
keeps going, just like if we had called ``sync_fn`` synchronously. This
is the default behavior.
* If ``cancellable=True``, then ``run_sync_in_worker_thread`` immediately
raises :exc:`Cancelled`. In this case **the thread keeps running in
background** – we just abandon it to do whatever it's going to do, and
silently discard any return value or errors that it raises. Only use
this if you know that the operation is safe and side-effect free. (For
example: :func:`trio.socket.getaddrinfo` is implemented using
:func:`run_sync_in_worker_thread`, and it sets ``cancellable=True``
because it doesn't really affect anything if a stray hostname lookup
keeps running in the background.)
The ``limiter`` is only released after the thread has *actually*
finished – which in the case of cancellation may be some time after
:func:`run_sync_in_worker_thread` has returned. (This is why it's
crucial that :func:`run_sync_in_worker_thread` takes care of acquiring
and releasing the limiter.) If :func:`trio.run` finishes before the
thread does, then the limiter release method will never be called at
all.
.. warning::
You should not use :func:`run_sync_in_worker_thread` to call
long-running CPU-bound functions! In addition to the usual GIL-related
reasons why using threads for CPU-bound work is not very effective in
Python, there is an additional problem: on CPython, `CPU-bound threads
tend to "starve out" IO-bound threads
<https://bugs.python.org/issue7946>`__, so using
:func:`run_sync_in_worker_thread` for CPU-bound work is likely to
adversely affect the main thread running Trio. If you need to do this,
you're better off using a worker process, or perhaps PyPy (which still
has a GIL, but may do a better job of fairly allocating CPU time
between threads).
Returns:
Whatever ``sync_fn(*args)`` returns.
Raises:
Exception: Whatever ``sync_fn(*args)`` raises.
"""
await trio.hazmat.checkpoint_if_cancelled()
token = trio.hazmat.current_trio_token()
if limiter is None:
limiter = current_default_worker_thread_limiter()
# Holds a reference to the task that's blocked in this function waiting
# for the result – or None if this function was cancelled and we should
# discard the result.
task_register = [trio.hazmat.current_task()]
name = "trio-worker-{}".format(next(_worker_thread_counter))
placeholder = ThreadPlaceholder(name)
# This function gets scheduled into the Trio run loop to deliver the
# thread's result.
def report_back_in_trio_thread_fn(result):
def do_release_then_return_result():
# release_on_behalf_of is an arbitrary user-defined method, so it
# might raise an error. If it does, we want that error to
# replace the regular return value, and if the regular return was
# already an exception then we want them to chain.
try:
return result.unwrap()
finally:
limiter.release_on_behalf_of(placeholder)
result = outcome.capture(do_release_then_return_result)
if task_register[0] is not None:
trio.hazmat.reschedule(task_register[0], result)
# This is the function that runs in the worker thread to do the actual
# work and then schedule the call to report_back_in_trio_thread_fn
def worker_thread_fn():
result = outcome.capture(sync_fn, *args)
try:
token.run_sync_soon(report_back_in_trio_thread_fn, result)
except trio.RunFinishedError:
# The entire run finished, so our particular task is certainly
# long gone -- it must have cancelled.
pass
await limiter.acquire_on_behalf_of(placeholder)
try:
# daemon=True because it might get left behind if we cancel, and in
# this case shouldn't block process exit.
thread = threading.Thread(
target=worker_thread_fn, name=name, daemon=True
)
thread.start()
except:
limiter.release_on_behalf_of(placeholder)
raise
def abort(_):
if cancellable:
task_register[0] = None
return trio.hazmat.Abort.SUCCEEDED
else:
return trio.hazmat.Abort.FAILED
return await trio.hazmat.wait_task_rescheduled(abort)
```
|
{
"source": "jefffm/swimpy",
"score": 2
}
|
#### File: swimpy/swimpy/process.py
```python
from __future__ import absolute_import
import logging
import multiprocessing
import signal
import sys
import msgpack
from tornado.ioloop import PeriodicCallback
from .app import Application
from .constants import (
PING_INTERVAL,
SHUTDOWN_TIMEOUT,
)
from .model.message import Sync
from .server import Server
from .tornado.splay_callback import PeriodicCallbackWithSplay
LOGGER = logging.getLogger(__name__)
class SwimpyProcess(multiprocessing.Process):
def __init__(self, routes, node, pipe, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.routes = routes
self.node = node
self.pipe = pipe
self.bind_addr = node.addr
self.bind_port = node.port
# We don't want to initialize these until after we fork
self.ioloop = None
self.server = None
self.app = None
def _handle_pipe_messages(self, *args, **kwargs):
message = self.pipe.recv()
unpacked_message = msgpack.unpackb(message)
message_type = unpacked_message.pop('type')
if message_type == Sync.MESSAGE_TYPE:
self.pipe.send(self.app.nodes)
def run(self):
signal.signal(signal.SIGTERM, self.shutdown_sig_handler)
signal.signal(signal.SIGINT, self.shutdown_sig_handler)
from tornado.ioloop import IOLoop
self.ioloop = IOLoop().current() # get a reference to the IOLoop post-fork
LOGGER.info('Starting server on tcp://{}:{}'.format(self.bind_addr, self.bind_port))
self.app = Application(routes=self.routes, node=self.node, pipe=self.pipe)
self.server = Server(message_handler=self.app.route_stream_message)
self.server.listen(self.bind_port, address=self.bind_addr)
self.ioloop.add_handler(self.pipe, self._handle_pipe_messages, self.ioloop.READ)
self.ioloop.spawn_callback(self.app.send_buffered_gossip)
# Ping a random node every PING_INVERVAL seconds, +/- 10%
# This jitter should reduce the average aggregate peak network throughput
# when running with large cluster sized
PeriodicCallbackWithSplay(self.app.ping_random_node,
PING_INTERVAL * 1000,
splay_pct=10).start()
LOGGER.info('Starting ioloop')
self.ioloop.start()
def stop(self, shutdown_timeout=SHUTDOWN_TIMEOUT, graceful=True):
"""
Trigger a graceful stop of the server and the server's ioloop, allowing in-flight
connections to finish
Fall back to a "less graceful" stop when stop is reached
"""
def poll_stop():
# Tornado uses a "waker" handler internally that we'd like to ignore here
remaining_handlers = {
k: v
for k, v in self.ioloop._handlers.iteritems()
if k != self.ioloop._waker.fileno()
}
# Poll for any remaining connections
remaining_count = len(remaining_handlers)
# Once all handlers have been removed, we can stop safely
if remaining_count == 0:
LOGGER.info('All finished! Graceful stop complete')
self.ioloop.stop()
else:
LOGGER.info('Waiting on IO handlers ({} remaining). '
'Handlers: {!r}'.format(remaining_count,
remaining_handlers))
self.ioloop.remove_handler(self.pipe)
self.server.shutdown()
self.server.stop()
if graceful:
# Poll the IOLoop's handlers until they all shut down.
poller = PeriodicCallback(poll_stop, 500, io_loop=self.ioloop)
poller.start()
self.ioloop.add_timeout(self.ioloop.time() + shutdown_timeout,
self.ioloop.stop)
else:
self.ioloop.stop()
def shutdown_sig_handler(self, sig, frame):
"""
Signal handler for "stop" signals (SIGTERM, SIGINT)
"""
LOGGER.warning('{!r} caught signal {!r}! Shutting down'.format(self, sig))
try:
self.ioloop.add_callback_from_signal(self.stop)
except Exception as e:
LOGGER.error(
'Encountered exception while shutting down: {}'.format(e)
)
sys.exit(1)
```
#### File: swimpy/test/test_integ.py
```python
from time import sleep
import logging
from flaky import flaky
import pytest
from swimpy.routes import ROUTES
from swimpy.model.message import Ping, Ack, PingReq, Alive
from swimpy.model.node import Node
from swimpy.runtime import Runtime
from swimpy.util import send_message
LOGGER = logging.getLogger(__name__)
@pytest.mark.timeout(10)
@pytest.mark.integration()
def test_runtime_responds_to_ping():
n1 = Node(node_id='node-1', addr='127.0.0.1', port=1338)
r = Runtime(routes=ROUTES, node=n1)
try:
r.start()
sleep(1)
assert r.is_alive()
ping = Ping(seqno=55, node=n1)
ack = send_message(n1.addr, n1.port, ping, reply_cls=Ack)[0]
# Make sure the sequence numbers match
assert ack.seqno == ping.seqno
finally:
r.stop()
@pytest.mark.timeout(10)
@pytest.mark.integration()
def test_runtime_responds_to_pingreq():
n1 = Node(node_id='node-1', addr='127.0.0.1', port=9000)
r1 = Runtime(routes=ROUTES, node=n1)
n2 = Node(node_id='node-2', addr='127.0.0.1', port=9001)
r2 = Runtime(routes=ROUTES, node=n2)
try:
LOGGER.info('Starting node1')
r1.start()
LOGGER.info('Starting node2')
r2.start()
sleep(1)
assert r1.is_alive()
assert r2.is_alive()
LOGGER.info('node1 and node2 are alive')
# Send a ping-req to node-1 for node-2 and wait for an ack
pingreq = PingReq(seqno=101, node=n1, target_node=n2)
ack = send_message(n1.addr, n1.port, pingreq, reply_cls=Ack)[0]
# Make sure the sequence numbers match
assert ack.seqno == pingreq.seqno
finally:
r1.stop()
r2.stop()
@flaky
@pytest.mark.timeout(15)
@pytest.mark.parametrize('num_nodes,deadline', [
(3, 1),
(12, 7),
])
@pytest.mark.integration()
def test_join(num_nodes, deadline):
"""
Test that we're able to join <num_nodes> into a cluster within <deadline> secs
This *usually* passes, but the flaky decorator will retry in the improbable
case it does fail
"""
nodes = {}
runtimes = {}
port = 10090
for i in xrange(num_nodes):
node_id = 'node-{}'.format(i)
nodes[node_id] = Node(node_id=node_id, addr='127.0.0.1', port=port + i)
runtimes[node_id] = Runtime(routes=ROUTES, node=nodes[node_id])
try:
for runtime in runtimes.values():
runtime.start()
sleep(1)
for node_id, runtime in runtimes.iteritems():
assert runtime.is_alive()
LOGGER.info('{} is alive'.format(node_id))
node_ids = nodes.keys()
for i, node_id in enumerate(node_ids[:-1]):
next_node_id = node_ids[i + 1]
alive = Alive(node=nodes[next_node_id], sender=nodes[next_node_id])
node = nodes[node_id]
send_message(node.addr, node.port, alive)
LOGGER.info('Sleeping for {} seconds'.format(deadline))
sleep(deadline)
for node_id in nodes:
for runtime in runtimes.values():
LOGGER.info('checking if {} is in runtime {}'.format(node_id, runtime.nodes.keys()))
assert node_id in runtime.nodes.keys() # .keys() gives us better debug output
finally:
LOGGER.info('Shutting down runtimes')
for runtime in runtimes.values():
runtime.stop()
@pytest.mark.timeout(15)
@pytest.mark.integration()
def test_join_with_seed_nodes():
# Create three swimpy Runtime objects
n1 = Node(node_id='node-1', addr='127.0.0.1', port=9900)
r1 = Runtime(routes=ROUTES, node=n1)
# Configure a list of seed nodes to send JOINs to on startup
n2 = Node(node_id='node-2', addr='127.0.0.1', port=9901)
r2 = Runtime(routes=ROUTES, node=n2, seed_nodes=[('127.0.0.1', 9900)])
n3 = Node(node_id='node-3', addr='127.0.0.1', port=9902)
r3 = Runtime(routes=ROUTES, node=n3, seed_nodes=[('127.0.0.1', 9901)])
try:
r1.start()
sleep(1)
r2.start()
sleep(1)
r3.start()
sleep(1)
for runtime in [r1, r2, r3]:
nodes_dict = runtime.nodes
LOGGER.info('Checking {} for all three nodes'.format(runtime))
assert sorted(nodes_dict) == ['node-1', 'node-2', 'node-3']
except Exception as e:
LOGGER.exception(e)
finally:
try:
r1.stop()
r2.stop()
r3.stop()
except Exception as e:
LOGGER.exception(e)
raise
```
|
{
"source": "jeffFranklin/uw-saml-python",
"score": 3
}
|
#### File: uw_saml2/idp/attribute.py
```python
def map(attribute_data, idp):
"""
Map attribute data from an IdP's SAML Response to values that are
easier to consume.
"""
attribute_map = {idp.id_attribute: idp.mapped_id_attribute}
attribute_map.update(idp.attribute_map)
for key, values in attribute_data.items():
attribute = attribute_map.get(key, key)
if not isinstance(attribute, Attribute):
attribute = Attribute(attribute)
value = attribute.map(values)
if value is not None:
yield attribute.name, value
class Attribute:
"""Base class for mapping a list of attribute values."""
def __init__(self, name):
self.name = name
def map(self, values):
"""Return only the first value in a list of values."""
if not values:
return None
return values[0]
class List(Attribute):
"""An attribute key whose values should be returned as a list."""
def map(self, values):
return values[:]
class UWGroups(List):
"""An attribute that splits out the common UW Groups prefix."""
prefix = 'urn:mace:washington.edu:groups:'
def map(self, values):
results = []
for value in values:
if value.startswith(self.prefix):
value = value.split(self.prefix)[1]
results.append(value)
return results
class NestedNameid(Attribute):
"""An attribute that's an object of a NameId structure."""
def map(self, values):
if not values:
return None
return values[0].get('NameID', {}).get('value')
```
|
{
"source": "jeff-fred/TerminalStockViewer",
"score": 3
}
|
#### File: jeff-fred/TerminalStockViewer/terminal.py
```python
import os
import data
import stock
import etf
## FINAL VARS ##
TITLE = """\n
============================
Terminal Stock Viewer
============================
"""
INTRODUCTION = "Welcome!\nThis app will allow you to view the metrics of any stock :) \nEnjoy."
TRACKED_TICKER = "\nTracked Ticker: "
MAIN_MENU = """
1. Information
2. Change Ticker
3. Exit
"""
INFORMATION_MENU = """
1. Basic information
2. All information
3. Price information
4. Dividend information
"""
CHANGE_TICKER_MENU = """
Please entere a new ticker to track as well as a period and interval.
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Valid Intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
"""
## VARIABLES ##
trackedTicker = 'None'
trackedPeriod = 'None'
trackedInterval = 'None'
## FUNCTIONS ##
# Clear the terminal of all other junk
def clear_terminal():
os.system('cls')
# Display title
def display_title():
print(TITLE)
# Display intro
def display_introduction():
print(INTRODUCTION)
# Display terminal menu
def display_main_menu():
print(MAIN_MENU)
def display_info_menu():
print(INFORMATION_MENU)
# Display tracked ticker
def display_tracked_ticker():
print(TRACKED_TICKER + trackedTicker)
# Display all given dictionary info
def display_given_info(info):
for k,v in info.items():
print("{0}: {1}".format(k, v))
# Display options to go back or end
def display_end_options():
print("\n\t5. Back to Main Menu\n\t6. Exit\n\n")
pass
# Display basic info depending on quoteType
def display_basic_info(ticker):
if data.get_ticker_type(ticker) == 'EQUITY':
display_given_info(stock.get_basic_stock_info(ticker))
elif data.get_ticker_type(ticker) == 'ETF':
display_given_info(etf.get_basic_ETF_info(ticker))
else:
print("Error, restart app & check inputs.")
pass
# Change the ticker and its attributes
def request_ticker_change():
print(CHANGE_TICKER_MENU)
global trackedTicker, trackedPeriod, trackedInterval
trackedTicker = str(input("\n\tNew Ticker: "))
trackedPeriod = str(input("\tTicker Period: "))
trackedInterval = str(input("\tTicker interval: "))
# Request to choose
def request_choice():
try:
return int(input("Choice: "))
except:
print('Enter a integer next time; restart the app.')
## ----- MAIN TERMINAL APP -----
if __name__ == "__main__":
runningApp = True
clear_terminal()
request_ticker_change()
# Change ticker history right off the bat
trackedTicker = data.generate_ticker(trackedTicker)
data.change_history(trackedTicker, trackedPeriod, trackedInterval)
while runningApp:
clear_terminal()
display_title()
display_introduction()
display_main_menu()
choice = request_choice()
# Information
if choice == 1:
clear_terminal()
display_title()
display_info_menu()
choice = request_choice()
# Display the basic info
if choice == 1:
clear_terminal()
display_title()
display_basic_info(trackedTicker)
# Display all the information
if choice == 2:
clear_terminal()
display_title()
info = data.get_ticker_info(trackedTicker)
display_given_info(info)
# Display the price info
if choice == 3:
clear_terminal()
display_title()
info = {}
if data.get_ticker_type(trackedTicker) == 'ETF':
info = etf.get_price_info(trackedTicker)
elif data.get_ticker_type(trackedTicker) == 'EQUITY':
info = stock.get_price_info(trackedTicker)
display_given_info(info)
# Display dividend information
if choice == 4:
clear_terminal()
display_title()
info = {}
if data.get_ticker_type(trackedTicker) == 'ETF':
info = etf.get_dividend_info(trackedTicker)
elif data.get_ticker_type(trackedTicker) == 'EQUITY':
info = stock.get_dividend_info(trackedTicker)
display_given_info(info)
display_end_options()
choice = request_choice()
if choice == 5:
choice = 0
elif choice == 6:
clear_terminal()
print("\n Bye bye!\n\n")
break
# Change ticker
if choice == 2:
request_ticker_change()
# Change ticker history
trackedTicker = data.generate_ticker(trackedTicker)
try:
data.change_history(trackedTicker, trackedPeriod, trackedInterval)
print("\nTicker Succesfully changed.")
except:
print("Error Occured, please restart the app.")
break
```
|
{
"source": "Jefffrey/stable-mango",
"score": 3
}
|
#### File: Jefffrey/stable-mango/poll.py
```python
import socket
import datetime
import time
import sqlite3
POLLING_INTERVAL = 5 # seconds
SQLITE3_DB_FILENAME = "raw.db"
DB_TABLE_NAME = "ping_data"
PING_SERVER = "8.8.8.8"
PING_PORT = 53 # DNS port
def ping():
try:
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((PING_SERVER, PING_PORT))
return True
except Exception:
return False
def main():
socket.setdefaulttimeout(3)
sqlite_conn = sqlite3.connect(SQLITE3_DB_FILENAME)
with sqlite_conn:
sqlite_conn.execute("PRAGMA journal_mode=wal")
table_sql = f"""
CREATE TABLE IF NOT EXISTS {DB_TABLE_NAME} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
event_time TEXT,
event_outcome TEXT,
event_server TEXT
);
"""
sqlite_conn.execute(table_sql)
while True:
event_time = datetime.datetime.now(datetime.timezone.utc).isoformat()
event_outcome = "SUCCESS" if ping() else "FAIL"
event_server = f"{PING_SERVER}:{PING_PORT}"
insert_args = (event_time, event_outcome, event_server)
insert_sql = f"""
INSERT INTO {DB_TABLE_NAME} (event_time, event_outcome, event_server)
VALUES(?,?,?);
"""
with sqlite_conn:
sqlite_conn.execute(insert_sql, insert_args)
print(f"Inserted: {event_time}, {event_outcome}, {event_server}")
time.sleep(POLLING_INTERVAL)
if __name__ == "__main__":
main()
```
|
{
"source": "Jefffrey/streamlink",
"score": 3
}
|
#### File: streamlink/docs/ext_argparse.py
```python
import argparse
import re
from textwrap import dedent
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives import unchanged
from docutils.statemachine import ViewList
from sphinx.util.nodes import nested_parse_with_titles
_block_re = re.compile(r":\n{2}\s{2}")
_default_re = re.compile(r"Default is (.+)\.\n")
_note_re = re.compile(r"Note: (.*)(?:\n\n|\n*$)", re.DOTALL)
_option_line_re = re.compile(r"^(?!\s{2}|Example: )(.+)$", re.MULTILINE)
_option_re = re.compile(r"(?:^|(?<=\s))(--\w[\w-]*\w)\b")
_prog_re = re.compile(r"%\(prog\)s")
def get_parser(module_name, attr):
module = __import__(module_name, globals(), locals(), [attr])
parser = getattr(module, attr)
return parser if not(callable(parser)) else parser.__call__()
def indent(value, length=4):
space = " " * length
return "\n".join(space + line for line in value.splitlines())
class ArgparseDirective(Directive):
has_content = True
option_spec = {
"module": unchanged,
"attr": unchanged,
}
_headlines = ["^", "~"]
def process_help(self, help):
# Dedent the help to make sure we are always dealing with
# non-indented text.
help = dedent(help)
# Replace option references with links.
# Do this before indenting blocks and notes.
help = _option_line_re.sub(
lambda m: (
_option_re.sub(
lambda m2: (
":option:`{0}`".format(m2.group(1))
if m2.group(1) in self._available_options
else m2.group(0)
),
m.group(1)
)
),
help
)
# Create simple blocks.
help = _block_re.sub("::\n\n ", help)
# Boldify the default value.
help = _default_re.sub(r"Default is: **\1**.\n", help)
# Create note directives from "Note: " paragraphs.
help = _note_re.sub(
lambda m: ".. note::\n\n" + indent(m.group(1)) + "\n\n",
help
)
# workaround to replace %(prog)s with streamlink
help = _prog_re.sub("streamlink", help)
return indent(help)
def generate_group_rst(self, group):
for action in group._group_actions:
# don't document suppressed parameters
if action.help == argparse.SUPPRESS:
continue
metavar = action.metavar
if isinstance(metavar, tuple):
metavar = " ".join(metavar)
options = []
# parameter(s) with metavar
if action.option_strings and metavar:
for arg in action.option_strings:
# optional parameter value
if action.nargs == "?":
metavar = f"[{metavar}]"
options.append(f"{arg} {metavar}")
# positional parameter
elif metavar:
options.append(metavar)
# parameter(s) without metavar
else:
options += action.option_strings
directive = ".. option:: "
options = f"\n{' ' * len(directive)}".join(options)
yield f"{directive}{options}"
yield ""
for line in self.process_help(action.help).split("\n"):
yield line
yield ""
if hasattr(action, "plugins") and len(action.plugins) > 0:
yield f" **Supported plugins:** {', '.join(action.plugins)}"
yield ""
def generate_parser_rst(self, parser, depth=0):
if depth >= len(self._headlines):
return
for group in parser._action_groups:
# Exclude empty groups
if not group._group_actions and not group._action_groups:
continue
title = group.title
yield ""
yield title
yield self._headlines[depth] * len(title)
yield from self.generate_group_rst(group)
if group._action_groups:
yield ""
yield from self.generate_parser_rst(group, depth + 1)
def run(self):
module = self.options.get("module")
attr = self.options.get("attr")
parser = get_parser(module, attr)
self._available_options = []
for action in parser._actions:
# positional parameters have an empty option_strings list
self._available_options += action.option_strings or [action.dest]
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.generate_parser_rst(parser):
result.append(line, "argparse")
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.add_directive("argparse", ArgparseDirective)
```
|
{
"source": "jefffriesen/hpxml_version_translator",
"score": 2
}
|
#### File: hpxml_version_translator/hpxml_version_translator/converter.py
```python
from collections import defaultdict
from copy import deepcopy
import datetime as dt
from deprecated import deprecated
from lxml import etree, objectify
import os
import pathlib
import re
import tempfile
from typing import Tuple, Union, BinaryIO, List
import io
import warnings
from hpxml_version_translator import exceptions as exc
File = Union[str, bytes, os.PathLike, BinaryIO]
def pathobj_to_str(x: File) -> Union[str, BinaryIO]:
"""Convert pathlib.Path object (if it is one) to a path string
lxml doesn't like pathlib.Path objects, so change them to a string if
necessary first.
:param x: filepath
:type x: pathlib.Path or str or file-like object
:return: file path string
:rtype: str or whatever was passed in
"""
if isinstance(x, pathlib.PurePath):
return str(x)
elif (
isinstance(x, str)
or isinstance(x, io.BufferedWriter)
or isinstance(x, io.BytesIO)
):
return x
else: # tempfile.NamedTemporaryFile
return x.name
def convert_str_version_to_tuple(version: str) -> Tuple[int]:
schema_version = list(map(int, version.split(".")))
schema_version.extend((3 - len(schema_version)) * [0])
return schema_version
def detect_hpxml_version(hpxmlfilename: File) -> List[int]:
doc = etree.parse(pathobj_to_str(hpxmlfilename))
return convert_str_version_to_tuple(doc.getroot().attrib["schemaVersion"])
def get_hpxml_versions(major_version: Union[int, None] = None) -> List[str]:
schemas_dir = pathlib.Path(__file__).resolve().parent / "schemas"
schema_versions = []
for schema_dir in schemas_dir.iterdir():
if not schema_dir.is_dir() or schema_dir.name == "v1.1.1":
continue
tree = etree.parse(str(schema_dir / "HPXMLDataTypes.xsd"))
root = tree.getroot()
ns = {"xs": root.nsmap["xs"]}
schema_versions.extend(
root.xpath(
'//xs:simpleType[@name="schemaVersionType"]/xs:restriction/xs:enumeration/@value',
namespaces=ns,
smart_strings=False,
)
)
if major_version:
schema_versions = list(
filter(
lambda x: convert_str_version_to_tuple(x)[0] == major_version,
schema_versions,
)
)
return schema_versions
def add_after(
parent_el: etree._Element, list_of_el_names: List[str], el_to_add: etree._Element
) -> None:
for sibling_name in reversed(list_of_el_names):
try:
sibling = getattr(parent_el, sibling_name)[-1]
except AttributeError:
continue
else:
sibling.addnext(el_to_add)
return
parent_el.insert(0, el_to_add)
def add_before(
parent_el: etree._Element, list_of_el_names: List[str], el_to_add: etree._Element
) -> None:
for sibling_name in list_of_el_names:
try:
sibling = getattr(parent_el, sibling_name)[0]
except AttributeError:
continue
else:
sibling.addprevious(el_to_add)
return
parent_el.append(el_to_add)
def convert_hpxml_to_version(
hpxml_version: str, hpxml_file: File, hpxml_out_file: File
) -> None:
# Validate that the hpxml_version requested is a valid one.
hpxml_version_strs = get_hpxml_versions()
schema_version_requested = convert_str_version_to_tuple(hpxml_version)
major_version_requested = schema_version_requested[0]
if hpxml_version not in hpxml_version_strs:
raise exc.HpxmlTranslationError(
f"HPXML version {hpxml_version} is not valid. Must be one of {', '.join(hpxml_version_strs)}."
)
# Validate that the hpxml_version requested is a newer one that the current one.
schema_version_file = detect_hpxml_version(hpxml_file)
major_version_file = schema_version_file[0]
if major_version_requested <= major_version_file:
raise exc.HpxmlTranslationError(
f"HPXML version requested is {hpxml_version} but input file major version is {schema_version_file[0]}"
)
version_translator_funcs = {
1: convert_hpxml1_to_2,
2: convert_hpxml2_to_3,
3: convert_hpxml3_to_4,
}
current_file = hpxml_file
with tempfile.TemporaryDirectory() as tmpdir:
for current_version in range(major_version_file, major_version_requested):
next_version = current_version + 1
if current_version + 1 == major_version_requested:
next_file = hpxml_out_file
version_translator_funcs[current_version](
current_file, next_file, hpxml_version
)
else:
next_file = pathlib.Path(tmpdir, f"{next_version}.xml")
version_translator_funcs[current_version](current_file, next_file)
current_file = next_file
@deprecated(version="1.0.0", reason="Use convert_hpxml_to_version instead")
def convert_hpxml_to_3(hpxml_file: File, hpxml3_file: File) -> None:
convert_hpxml_to_version("3.0", hpxml_file, hpxml3_file)
def convert_hpxml1_to_2(
hpxml1_file: File, hpxml2_file: File, version: str = "2.3"
) -> None:
"""Convert an HPXML v1 file to HPXML v2
:param hpxml1_file: HPXML v1 input file
:type hpxml1_file: pathlib.Path, str, or file-like
:param hpxml2_file: HPXML v2 output file
:type hpxml2_file: pathlib.Path, str, or file-like
:param version: Target version
:type version: str
"""
if version not in get_hpxml_versions(major_version=2):
raise exc.HpxmlTranslationError(
"convert_hpxml1_to_2 must have valid target version of 2.x, got {version}."
)
# Load Schemas
schemas_dir = pathlib.Path(__file__).resolve().parent / "schemas"
hpxml1_schema_doc = etree.parse(str(schemas_dir / "v1.1.1" / "HPXML.xsd"))
hpxml1_ns = hpxml1_schema_doc.getroot().attrib["targetNamespace"]
hpxml1_schema = etree.XMLSchema(hpxml1_schema_doc)
hpxml2_schema_doc = etree.parse(str(schemas_dir / "v2.3" / "HPXML.xsd"))
hpxml2_ns = hpxml2_schema_doc.getroot().attrib["targetNamespace"]
hpxml2_schema = etree.XMLSchema(hpxml2_schema_doc)
E = objectify.ElementMaker(
namespace=hpxml2_ns, nsmap={None: hpxml2_ns}, annotate=False
)
xpkw = {"namespaces": {"h": hpxml2_ns}}
# Ensure we're working with valid HPXML v1.x (earlier versions should validate against v1.1.1 schema)
hpxml1_doc = objectify.parse(pathobj_to_str(hpxml1_file))
hpxml1_schema.assertValid(hpxml1_doc)
# Change the namespace of every element to use the HPXML v2 namespace
# https://stackoverflow.com/a/51660868/11600307
change_ns_xslt = etree.parse(
str(pathlib.Path(__file__).resolve().parent / "change_namespace.xsl")
)
hpxml2_doc = hpxml1_doc.xslt(
change_ns_xslt, orig_namespace=f"'{hpxml1_ns}'", new_namespace=f"'{hpxml2_ns}'"
)
root = hpxml2_doc.getroot()
# Change version
root.attrib["schemaVersion"] = version
# TODO: Moved the BPI 2400 elements and renamed/reorganized them.
# Renamed element AttachedToCAZ under water heater to fix a typo.
for el in root.xpath("//h:WaterHeatingSystem/h:AtachedToCAZ", **xpkw):
el.tag = f"{{{hpxml2_ns}}}AttachedToCAZ"
# Removed "batch heater" from SolarCollectorLoopType in lieu of the previously
# added "integrated collector storage" enumeration on SolarThermalCollectorType.
for batch_heater in root.xpath(
'//h:SolarThermal/h:SolarThermalSystem[h:CollectorLoopType="batch heater"]',
**xpkw,
):
if not hasattr(batch_heater, "CollectorType"):
add_after(
batch_heater,
["CollectorLoopType"],
E.CollectorType("integrated collector storage"),
)
batch_heater.remove(batch_heater.CollectorLoopType)
# Throw a warning if there are BPI2400 elements and move it into an extension
bpi2400_els = root.xpath("//h:BPI2400Inputs", **xpkw)
if bpi2400_els:
warnings.warn(
"BPI2400Inputs in v1.1.1 are ambiguous and aren't translated into their "
"corresponding elements in v2.x. They have been moved to an extension instead."
)
for el in bpi2400_els:
parent_el = el.getparent()
if not hasattr(parent_el, "extension"):
parent_el.append(E.extension())
parent_el.extension.append(deepcopy(el))
parent_el.remove(el)
# Write out new file
hpxml2_doc.write(pathobj_to_str(hpxml2_file), pretty_print=True, encoding="utf-8")
hpxml2_schema.assertValid(hpxml2_doc)
def convert_hpxml2_to_3(
hpxml2_file: File, hpxml3_file: File, version: str = "3.0"
) -> None:
"""Convert an HPXML v2 file to HPXML v3
:param hpxml2_file: HPXML v2 input file
:type hpxml2_file: pathlib.Path, str, or file-like
:param hpxml3_file: HPXML v3 output file
:type hpxml3_file: pathlib.Path, str, or file-like
:param version: Target version
:type version: str
"""
if version not in get_hpxml_versions(major_version=3):
raise exc.HpxmlTranslationError(
"convert_hpxml2_to_3 must have valid target version of 3.x, got {version}."
)
# Load Schemas
schemas_dir = pathlib.Path(__file__).resolve().parent / "schemas"
hpxml2_schema_doc = etree.parse(str(schemas_dir / "v2.3" / "HPXML.xsd"))
hpxml2_ns = hpxml2_schema_doc.getroot().attrib["targetNamespace"]
hpxml2_schema = etree.XMLSchema(hpxml2_schema_doc)
hpxml3_schema_doc = etree.parse(str(schemas_dir / "v3.0" / "HPXML.xsd"))
hpxml3_ns = hpxml3_schema_doc.getroot().attrib["targetNamespace"]
hpxml3_schema = etree.XMLSchema(hpxml3_schema_doc)
E = objectify.ElementMaker(
namespace=hpxml3_ns, nsmap={None: hpxml3_ns}, annotate=False
)
xpkw = {"namespaces": {"h": hpxml3_ns}}
# Ensure we're working with valid HPXML v2.x (earlier versions should validate against v2.3 schema)
hpxml2_doc = objectify.parse(pathobj_to_str(hpxml2_file))
hpxml2_schema.assertValid(hpxml2_doc)
# Change the namespace of every element to use the HPXML v3 namespace
# https://stackoverflow.com/a/51660868/11600307
change_ns_xslt = etree.parse(
str(pathlib.Path(__file__).resolve().parent / "change_namespace.xsl")
)
hpxml3_doc = hpxml2_doc.xslt(
change_ns_xslt, orig_namespace=f"'{hpxml2_ns}'", new_namespace=f"'{hpxml3_ns}'"
)
root = hpxml3_doc.getroot()
# Change version
root.attrib["schemaVersion"] = version
# Standardized location mapping
location_map = {
"ambient": "outside", # 'ambient' will be mapped to 'ground' for FoundationWall
"conditioned space": "living space",
"unconditioned basement": "basement - unconditioned",
"unconditioned attic": "attic - unconditioned",
"unvented crawlspace": "crawlspace - unvented",
"vented crawlspace": "crawlspace - vented",
}
foundation_location_map = deepcopy(location_map)
foundation_location_map["ambient"] = "ground"
# Fixing project ids
# https://github.com/hpxmlwg/hpxml/pull/197
# This is really messy. I can see why we fixed it.
def get_pre_post_from_building_id(building_id):
event_type = root.xpath(
"h:Building[h:BuildingID/@id=$bldgid]/h:ProjectStatus/h:EventType/text()",
smart_strings=False,
bldgid=building_id,
**xpkw,
)
if len(event_type) == 1:
if event_type[0] in (
"proposed workscope",
"approved workscope",
"construction-period testing/daily test out",
"job completion testing/final inspection",
"quality assurance/monitoring",
):
return "post"
elif event_type[0] in ("audit", "preconstruction"):
return "pre"
else:
return None
else:
return None
for i, project in enumerate(root.xpath("h:Project", **xpkw), 1):
# Add the ProjectID element if it isn't there
if not hasattr(project, "ProjectID"):
add_after(project, ["BuildingID"], E.ProjectID(id=f"project-{i}"))
building_ids_by_pre_post = defaultdict(set)
# Gather together the buildings in BuildingID and ProjectSystemIdentifiers
building_id = project.BuildingID.attrib["id"]
building_ids_by_pre_post[get_pre_post_from_building_id(building_id)].add(
building_id
)
for psi in project.xpath("h:ProjectDetails/h:ProjectSystemIdentifiers", **xpkw):
building_id = psi.attrib.get("id")
building_ids_by_pre_post[get_pre_post_from_building_id(building_id)].add(
building_id
)
for pre_post in ("pre", "post"):
if len(building_ids_by_pre_post[pre_post]) == 0:
for building_id in root.xpath("h:Building/h:BuildingID/@id", **xpkw):
if get_pre_post_from_building_id(building_id) == pre_post:
building_ids_by_pre_post[pre_post].add(building_id)
# If there are more than one of each pre and post, throw an error
if len(building_ids_by_pre_post["pre"]) == 0:
raise exc.HpxmlTranslationError(
f"Project[{i}] has no references to Building nodes with an 'audit' or 'preconstruction' EventType."
)
elif len(building_ids_by_pre_post["pre"]) > 1:
raise exc.HpxmlTranslationError(
f"Project[{i}] has more than one reference to Building nodes with an "
"'audit' or 'preconstruction' EventType."
)
if len(building_ids_by_pre_post["post"]) == 0:
raise exc.HpxmlTranslationError(
f"Project[{i}] has no references to Building nodes with a post retrofit EventType."
)
elif len(building_ids_by_pre_post["post"]) > 1:
raise exc.HpxmlTranslationError(
f"Project[{i}] has more than one reference to Building nodes with a post retrofit EventType."
)
pre_building_id = building_ids_by_pre_post["pre"].pop()
post_building_id = building_ids_by_pre_post["post"].pop()
# Add the pre building
project.ProjectID.addnext(E.PreBuildingID(id=pre_building_id))
for el in root.xpath(
"h:Building/h:BuildingID[@id=$bldgid]/*", bldgid=pre_building_id, **xpkw
):
project.PreBuildingID.append(deepcopy(el))
# Add the post building
project.PreBuildingID.addnext(E.PostBuildingID(id=post_building_id))
for el in root.xpath(
"h:Building/h:BuildingID[@id=$bldgid]/*", bldgid=post_building_id, **xpkw
):
project.PostBuildingID.append(deepcopy(el))
# Move the ambiguous BuildingID to an extension
if not hasattr(project, "extension"):
project.append(E.extension())
project.extension.append(deepcopy(project.BuildingID))
project.remove(project.BuildingID)
# Move the ProjectSystemIdentifiers to an extension
for psi in project.xpath("h:ProjectDetails/h:ProjectSystemIdentifiers", **xpkw):
project.extension.append(deepcopy(psi))
project.ProjectDetails.remove(psi)
# Green Building Verification
# https://github.com/hpxmlwg/hpxml/pull/66
# This next one is covered here because the BPI-2101 verification didn't exist in v2, so no need to translate it
# https://github.com/hpxmlwg/hpxml/pull/210
energy_score_els = root.xpath(
"h:Building/h:BuildingDetails/h:BuildingSummary/h:BuildingConstruction/h:EnergyScore",
**xpkw,
)
for i, es in enumerate(energy_score_els, 1):
bldg_details = es.getparent().getparent().getparent()
if not hasattr(bldg_details, "GreenBuildingVerifications"):
add_after(
bldg_details,
["BuildingSummary", "ClimateandRiskZones"],
E.GreenBuildingVerifications(),
)
gbv = E.GreenBuildingVerification(
E.SystemIdentifier(id=f"energy-score-{i}"),
E.Type(
{
"US DOE Home Energy Score": "Home Energy Score",
"RESNET HERS": "HERS Index Score",
"other": "other",
}[str(es.ScoreType)]
),
E.Body(
{
"US DOE Home Energy Score": "US DOE",
"RESNET HERS": "RESNET",
"other": "other",
}[str(es.ScoreType)]
),
E.Metric(str(es.Score)),
)
if hasattr(es, "OtherScoreType"):
gbv.Type.addnext(E.OtherType(str(es.OtherScoreType)))
if hasattr(es, "ScoreDate"):
gbv.append(E.Year(dt.datetime.strptime(str(es.ScoreDate), "%Y-%m-%d").year))
if hasattr(es, "extension"):
gbv.append(deepcopy(es.extension))
bldg_details.GreenBuildingVerifications.append(gbv)
es.getparent().remove(es)
for i, prog_cert in enumerate(
root.xpath("h:Project/h:ProjectDetails/h:ProgramCertificate", **xpkw), 1
):
project_details = prog_cert.getparent()
bldg_id = project_details.getparent().PostBuildingID.attrib["id"]
bldg_details = root.xpath(
"h:Building[h:BuildingID/@id=$bldgid]/h:BuildingDetails",
bldgid=bldg_id,
**xpkw,
)[0]
if not hasattr(bldg_details, "GreenBuildingVerifications"):
add_after(
bldg_details,
["BuildingSummary", "ClimateandRiskZones"],
E.GreenBuildingVerifications(),
)
gbv = E.GreenBuildingVerification(
E.SystemIdentifier(id=f"program-certificate-{i}"),
E.Type(
{
"Home Performance with Energy Star": "Home Performance with ENERGY STAR",
"LEED Certified": "LEED For Homes",
"LEED Silver": "LEED For Homes",
"LEED Gold": "LEED For Homes",
"LEED Platinum": "LEED For Homes",
"other": "other",
}[str(prog_cert)]
),
)
if hasattr(project_details, "CertifyingOrganization"):
gbv.append(E.Body(str(project_details.CertifyingOrganization)))
m = re.match(r"LEED (\w+)$", str(prog_cert))
if m:
gbv.append(E.Rating(m.group(1)))
if hasattr(project_details, "CertifyingOrganizationURL"):
gbv.append(E.URL(str(project_details.CertifyingOrganizationURL)))
if hasattr(project_details, "YearCertified"):
gbv.append(E.Year(int(project_details.YearCertified)))
bldg_details.GreenBuildingVerifications.append(gbv)
for i, es_home_ver in enumerate(
root.xpath("h:Project/h:ProjectDetails/h:EnergyStarHomeVersion", **xpkw)
):
bldg_id = es_home_ver.getparent().getparent().PostBuildingID.attrib["id"]
bldg_details = root.xpath(
"h:Building[h:BuildingID/@id=$bldgid]/h:BuildingDetails",
bldgid=bldg_id,
**xpkw,
)[0]
if not hasattr(bldg_details, "GreenBuildingVerifications"):
add_after(
bldg_details,
["BuildingSummary", "ClimateandRiskZones"],
E.GreenBuildingVerifications(),
)
gbv = E.GreenBuildingVerification(
E.SystemIdentifier(id=f"energy-star-home-{i}"),
E.Type("ENERGY STAR Certified Homes"),
E.Version(str(es_home_ver)),
)
bldg_details.GreenBuildingVerifications.append(gbv)
for el_name in (
"CertifyingOrganization",
"CertifyingOrganizationURL",
"YearCertified",
"ProgramCertificate",
"EnergyStarHomeVersion",
):
for el in root.xpath(f"//h:ProjectDetails/h:{el_name}", **xpkw):
el.getparent().remove(el)
# Addressing Inconsistencies
# https://github.com/hpxmlwg/hpxml/pull/124
for el in root.xpath("//h:HeatPump/h:AnnualCoolEfficiency", **xpkw):
el.tag = f"{{{hpxml3_ns}}}AnnualCoolingEfficiency"
for el in root.xpath("//h:HeatPump/h:AnnualHeatEfficiency", **xpkw):
el.tag = f"{{{hpxml3_ns}}}AnnualHeatingEfficiency"
# Replaces Measure/InstalledComponent with Measure/InstalledComponents/InstalledComponent
for i, ic in enumerate(
root.xpath(
"h:Project/h:ProjectDetails/h:Measures/h:Measure/h:InstalledComponent",
**xpkw,
)
):
ms = ic.getparent()
if not hasattr(ms, "InstalledComponents"):
add_before(ms, ["extension"], E.InstalledComponents())
ms.InstalledComponents.append(deepcopy(ic))
ms.remove(ic)
# Replaces WeatherStation/SystemIdentifiersInfo with WeatherStation/SystemIdentifier
for el in root.xpath("//h:WeatherStation/h:SystemIdentifiersInfo", **xpkw):
el.tag = f"{{{hpxml3_ns}}}SystemIdentifier"
# Renames "central air conditioning" to "central air conditioner" for CoolingSystemType
for el in root.xpath("//h:CoolingSystem/h:CoolingSystemType", **xpkw):
if el == "central air conditioning":
el._setText("central air conditioner")
# Renames HeatPump/BackupAFUE to BackupAnnualHeatingEfficiency, accepts 0-1 instead of 1-100
for bkupafue in root.xpath(
"h:Building/h:BuildingDetails/h:Systems/h:HVAC/h:HVACPlant/h:HeatPump/h:BackupAFUE",
**xpkw,
):
heatpump = bkupafue.getparent()
add_before(
heatpump,
[
"BackupHeatingCapacity",
"BackupHeatingSwitchoverTemperature",
"FractionHeatLoadServed",
"FractionCoolLoadServed",
"FloorAreaServed",
"AnnualCoolingEfficiency",
"AnnualHeatingEfficiency",
"extension",
],
E.BackupAnnualHeatingEfficiency(
E.Units("AFUE"), E.Value(f"{float(bkupafue.text) / 100}")
),
)
heatpump.remove(bkupafue)
# Renames FoundationWall/BelowGradeDepth to FoundationWall/DepthBelowGrade
for el in root.xpath("//h:FoundationWall/h:BelowGradeDepth", **xpkw):
el.tag = f"{{{hpxml3_ns}}}DepthBelowGrade"
# Clothes Dryer CEF
# https://github.com/hpxmlwg/hpxml/pull/145
for el in root.xpath("//h:ClothesDryer/h:EfficiencyFactor", **xpkw):
el.tag = f"{{{hpxml3_ns}}}EnergyFactor"
# Enclosure
# https://github.com/hpxmlwg/hpxml/pull/181
for fw in root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:Foundations/h:Foundation/h:FoundationWall",
**xpkw,
):
enclosure = fw.getparent().getparent().getparent()
foundation = fw.getparent()
add_before(
foundation,
["AttachedToFrameFloor", "AttachedToSlab", "AnnualEnergyUse", "extension"],
E.AttachedToFoundationWall(idref=fw.SystemIdentifier.attrib["id"]),
)
if not hasattr(enclosure, "FoundationWalls"):
add_after(
enclosure,
[
"AirInfiltration",
"Attics",
"Foundations",
"Garages",
"Roofs",
"RimJoists",
"Walls",
],
E.FoundationWalls(),
)
enclosure.FoundationWalls.append(deepcopy(fw))
this_fw = enclosure.FoundationWalls.FoundationWall[-1]
if hasattr(this_fw, "AdjacentTo"):
try:
fw_boundary = foundation_location_map[str(fw.AdjacentTo)]
except KeyError:
fw_boundary = str(fw.AdjacentTo) # retain unchanged location name
try:
boundary_v3 = {
"other housing unit": "Exterior",
"ground": "Exterior",
"ambient": "Exterior",
"attic": "Exterior",
"garage": "Exterior",
"living space": "Interior",
"unconditioned basement": "Interior",
"crawlspace": "Interior",
}[str(fw.AdjacentTo)]
if boundary_v3 == "Interior" and hasattr(foundation, "FoundationType"):
# Check that this matches the Foundation/FoundationType if available
if fw.AdjacentTo == "unconditioned basement" and (
foundation.xpath(
'count(h:FoundationType/h:Basement/h:Conditioned[text()="true"])',
**xpkw,
)
> 0
or not hasattr(foundation.FoundationType, "Basement")
):
boundary_v3 = "Exterior"
elif fw.AdjacentTo == "crawlspace" and not hasattr(
foundation.FoundationType, "Crawlspace"
):
boundary_v3 = "Exterior"
add_after(
this_fw,
["SystemIdentifier", "ExternalResource", "AttachedToSpace"],
getattr(E, f"{boundary_v3}AdjacentTo")(fw_boundary),
)
except KeyError:
pass
this_fw.remove(this_fw.AdjacentTo)
foundation.remove(fw)
# Attics
for bldg_const in root.xpath(
"h:Building/h:BuildingDetails/h:BuildingSummary/h:BuildingConstruction", **xpkw
):
if hasattr(bldg_const, "AtticType"):
if bldg_const.AtticType == "vented attic":
bldg_const.AtticType = E.AtticType(E.Attic(E.Vented(True)))
elif bldg_const.AtticType == "unvented attic":
bldg_const.AtticType = E.AtticType(E.Attic(E.Vented(False)))
elif bldg_const.AtticType == "flat roof":
bldg_const.AtticType = E.AtticType(E.FlatRoof())
elif bldg_const.AtticType == "cathedral ceiling":
bldg_const.AtticType = E.AtticType(E.CathedralCeiling())
elif bldg_const.AtticType == "cape cod":
bldg_const.AtticType = E.AtticType(E.Attic(E.CapeCod(True)))
elif bldg_const.AtticType == "other":
bldg_const.AtticType = E.AtticType(E.Other())
elif bldg_const.AtticType == "venting unknown attic":
bldg_const.AtticType = E.AtticType(
E.Attic(E.extension(E.Vented("unknown")))
)
for i, attic in enumerate(
root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:AtticAndRoof/h:Attics/h:Attic",
**xpkw,
)
):
enclosure = attic.getparent().getparent().getparent()
this_attic = deepcopy(attic)
this_attic_type = None
if hasattr(this_attic, "AtticType"):
this_attic_type = this_attic.AtticType
if this_attic.AtticType == "vented attic":
this_attic.AtticType = E.AtticType(E.Attic(E.Vented(True)))
elif this_attic.AtticType == "unvented attic":
this_attic.AtticType = E.AtticType(E.Attic(E.Vented(False)))
elif this_attic.AtticType == "flat roof":
this_attic.AtticType = E.AtticType(E.FlatRoof())
elif this_attic.AtticType == "cathedral ceiling":
this_attic.AtticType = E.AtticType(E.CathedralCeiling())
elif this_attic.AtticType == "cape cod":
this_attic.AtticType = E.AtticType(E.Attic(E.CapeCod(True)))
elif this_attic.AtticType == "other":
this_attic.AtticType = E.AtticType(E.Other())
elif this_attic.AtticType == "venting unknown attic":
this_attic.AtticType = E.AtticType(
E.Attic(E.extension(E.Vented("unknown")))
)
else:
raise exc.HpxmlTranslationError(
f"{hpxml2_file.name} was not able to be translated "
f"because 'AtticType' of {this_attic.SystemIdentifier.attrib['id']} is unknown."
)
if not hasattr(enclosure, "Attics"):
add_after(enclosure, ["AirInfiltration"], E.Attics())
# rearrange AttachedToRoof
if hasattr(this_attic, "AttachedToRoof"):
attached_to_roof = deepcopy(this_attic.AttachedToRoof)
this_attic.remove(
this_attic.AttachedToRoof
) # remove the AttachedToRoof of HPXML v2
add_after(
this_attic,
["SystemIdentifier", "AttachedToSpace", "AtticType", "VentilationRate"],
attached_to_roof,
)
# find the wall with the same id and add AtticWallType = knee wall
if hasattr(this_attic, "AtticKneeWall"):
knee_wall_id = this_attic.AtticKneeWall.attrib["idref"]
try:
knee_wall = root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:Walls/h:Wall[h:SystemIdentifier/@id=$sysid]",
sysid=knee_wall_id,
**xpkw,
)[0]
except IndexError:
warnings.warn(
f"Cannot find a knee wall attached to {this_attic.SystemIdentifier.attrib['id']}."
)
else:
if not hasattr(knee_wall, "AtticWallType"):
add_after(
knee_wall,
[
"SystemIdentifier",
"ExteriorAdjacentTo",
"InteriorAdjacentTo",
],
E.AtticWallType("knee wall"),
)
add_before(
this_attic,
["AttachedToFrameFloor", "AnnualEnergyUse", "extension"],
E.AttachedToWall(idref=knee_wall_id),
)
# create a FrameFloor adjacent to the attic and assign the area below to Area
# and then copy AtticFloorInsulation over to Insulation of the frame floor
if hasattr(this_attic, "AtticFloorInsulation") or (
this_attic_type not in ["cathedral ceiling", "flat roof", "cape cod"]
):
if not hasattr(enclosure, "FrameFloors"):
add_before(
enclosure,
["Slabs", "Windows", "Skylights", "Doors", "extension"],
E.FrameFloors(),
)
attic_floor_el = E.FrameFloor(E.SystemIdentifier(id=f"attic-floor-{i}"))
attic_floor_id = attic_floor_el.SystemIdentifier.attrib["id"]
add_before(
this_attic,
["AnnualEnergyUse", "extension"],
E.AttachedToFrameFloor(idref=attic_floor_id),
)
if hasattr(this_attic, "Area"):
attic_floor_el.append(E.Area(float(this_attic.Area)))
if hasattr(this_attic, "AtticFloorInsulation"):
attic_floor_insulation = deepcopy(this_attic.AtticFloorInsulation)
attic_floor_insulation.tag = f"{{{hpxml3_ns}}}Insulation"
attic_floor_el.append(attic_floor_insulation)
enclosure.FrameFloors.append(attic_floor_el)
# find Roof attached to Attic and move Insulation to Roof
# add insulation to v2 Roofs and these roofs will be converted into hpxml v3 later
if hasattr(this_attic, "AtticRoofInsulation"):
roof_insulation = deepcopy(this_attic.AtticRoofInsulation)
roof_insulation.tag = f"{{{hpxml3_ns}}}Insulation"
roof_idref = this_attic.AttachedToRoof.attrib["idref"]
try:
roof_attached_to_this_attic = root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:AtticAndRoof/\
h:Roofs/h:Roof[h:SystemIdentifier/@id=$sysid]",
sysid=roof_idref,
**xpkw,
)[0]
except IndexError:
warnings.warn(
f"Cannot find a roof attached to {this_attic.SystemIdentifier.attrib['id']}."
)
else:
add_before(roof_attached_to_this_attic, ["extension"], roof_insulation)
# translate v2 Attic/Area to the v3 Roof/Area for "cathedral ceiling" and "flat roof"
if hasattr(this_attic, "Area") and this_attic_type in [
"cathedral ceiling",
"flat roof",
]:
try:
roof_idref = this_attic.AttachedToRoof.attrib["idref"]
roof_attached_to_this_attic = root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:AtticAndRoof/\
h:Roofs/h:Roof[h:SystemIdentifier/@id=$sysid]",
sysid=roof_idref,
**xpkw,
)[0]
except IndexError:
warnings.warn(
f"Cannot find a roof attached to {this_attic.SystemIdentifier.attrib['id']}."
)
else:
if not hasattr(roof_attached_to_this_attic, "RoofArea"):
add_before(
roof_attached_to_this_attic,
["RadiantBarrier", "RadiantBarrierLocation", "extension"],
E.RoofArea(this_attic.Area.text),
)
# move Rafters to v2 Roofs and these roofs will be converted into hpxml v3 later
if hasattr(this_attic, "Rafters"):
rafters = deepcopy(this_attic.Rafters)
roof_idref = this_attic.AttachedToRoof.attrib["idref"]
try:
roof_attached_to_this_attic = root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:AtticAndRoof/\
h:Roofs/h:Roof[h:SystemIdentifier/@id=$sysid]",
sysid=roof_idref,
**xpkw,
)[0]
except IndexError:
warnings.warn(
f"Cannot find a roof attached to {this_attic.SystemIdentifier.attrib['id']}."
)
else:
add_after(
roof_attached_to_this_attic,
[
"SystemIdentifier",
"ExternalResource",
"AttachedToSpace",
"RoofColor",
"SolarAbsorptance",
"Emittance",
],
rafters,
)
if hasattr(this_attic, "InteriorAdjacentTo") and hasattr(
this_attic, "AtticType"
):
if this_attic_type in ["cathedral ceiling", "flat roof", "cape cod"]:
try:
roof_idref = this_attic.AttachedToRoof.attrib["idref"]
roof_attached_to_this_attic = root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:AtticAndRoof/h:Roofs/\
h:Roof[h:SystemIdentifier/@id=$sysid]",
sysid=roof_idref,
**xpkw,
)[0]
except (AttributeError, IndexError):
warnings.warn(
f"Cannot find a roof attached to {this_attic.SystemIdentifier.attrib['id']}."
)
else:
add_after(
roof_attached_to_this_attic,
["SystemIdentifier", "ExternalResource", "AttachedToSpace"],
E.InteriorAdjacentTo(this_attic.InteriorAdjacentTo.text),
)
else:
try:
floor_idref = this_attic.AttachedToFrameFloor.attrib["idref"]
floor_attached_to_this_attic = root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:FrameFloors/\
h:FrameFloor[h:SystemIdentifier/@id=$sysid]",
sysid=floor_idref,
**xpkw,
)[0]
except (AttributeError, IndexError):
warnings.warn(
f"Cannot find a frame floor attached to {this_attic.SystemIdentifier.attrib['id']}."
)
else:
add_after(
floor_attached_to_this_attic,
[
"SystemIdentifier",
"ExternalResource",
"AttachedToSpace",
"ExteriorAdjacentTo",
],
E.InteriorAdjacentTo(this_attic.InteriorAdjacentTo.text),
)
el_not_in_v3 = [
"ExteriorAdjacentTo",
"InteriorAdjacentTo",
"AtticKneeWall",
"AtticFloorInsulation",
"AtticRoofInsulation",
"Area",
"Rafters",
]
for el in el_not_in_v3:
if hasattr(this_attic, el):
this_attic.remove(this_attic[el])
enclosure.Attics.append(this_attic)
# Roofs
for roof in root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:AtticAndRoof/h:Roofs/h:Roof", **xpkw
):
enclosure = roof.getparent().getparent().getparent()
if not hasattr(enclosure, "Roofs"):
add_after(
enclosure,
["AirInfiltration", "Attics", "Foundations", "Garages"],
E.Roofs(),
)
enclosure.Roofs.append(deepcopy(roof))
this_roof = enclosure.Roofs.Roof[-1]
if hasattr(roof, "RoofArea"):
add_after(
this_roof,
[
"SystemIdentifier",
"ExternalResource",
"AttachedToSpace",
"InteriorAdjacentTo",
],
E.Area(float(roof.RoofArea)),
)
this_roof.remove(this_roof.RoofArea)
if hasattr(roof, "RoofType"):
roof_type = str(roof.RoofType)
this_roof.remove(this_roof.RoofType) # remove the RoofType of HPXML v2
add_after(
this_roof,
[
"SystemIdentifier",
"ExternalResource",
"AttachedToSpace",
"InteriorAdjacentTo",
"Area",
"Orientation",
"Azimuth",
],
E.RoofType(roof_type),
)
# remove AtticAndRoof after rearranging all attics and roofs
for enclosure in root.xpath("h:Building/h:BuildingDetails/h:Enclosure", **xpkw):
try:
enclosure.remove(enclosure.AtticAndRoof)
except AttributeError:
pass
# Frame Floors
for ff in root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:Foundations/h:Foundation/h:FrameFloor",
**xpkw,
):
enclosure = ff.getparent().getparent().getparent()
foundation = ff.getparent()
add_before(
foundation,
["AttachedToSlab", "AnnualEnergyUse", "extension"],
E.AttachedToFrameFloor(idref=ff.SystemIdentifier.attrib["id"]),
)
if not hasattr(enclosure, "FrameFloors"):
add_before(
enclosure,
["Slabs", "Windows", "Skylights", "Doors", "extension"],
E.FrameFloors(),
)
this_ff = deepcopy(ff)
enclosure.FrameFloors.append(this_ff)
foundation.remove(ff)
# Slabs
for slab in root.xpath(
"h:Building/h:BuildingDetails/h:Enclosure/h:Foundations/h:Foundation/h:Slab",
**xpkw,
):
enclosure = slab.getparent().getparent().getparent()
foundation = slab.getparent()
add_before(
foundation,
["AnnualEnergyUse", "extension"],
E.AttachedToSlab(idref=slab.SystemIdentifier.attrib["id"]),
)
if not hasattr(enclosure, "Slabs"):
add_before(
enclosure, ["Windows", "Skylights", "Doors", "extension"], E.Slabs()
)
enclosure.Slabs.append(deepcopy(slab))
foundation.remove(slab)
# Allow insulation location to be layer-specific
# https://github.com/hpxmlwg/hpxml/pull/188
for insulation_location in root.xpath(
"//h:Insulation/h:InsulationLocation", **xpkw
):
# Insulation location to be layer-specific
insulation = insulation_location.getparent()
if hasattr(insulation, "Layer"):
for layer in insulation.Layer:
if layer.InstallationType == "continuous":
layer.InstallationType._setText(
f"continuous - {str(insulation.InsulationLocation)}"
)
insulation.remove(insulation.InsulationLocation)
# Windows and Skylights
# Window sub-components
# https://github.com/hpxmlwg/hpxml/pull/202
for i, win in enumerate(root.xpath("//h:Window|//h:Skylight", **xpkw)):
if hasattr(win, "VisibleTransmittance"):
vis_trans = float(win.VisibleTransmittance)
win.remove(
win.VisibleTransmittance
) # remove VisibleTransmittance of HPXML v2
add_after(
win,
[
"SystemIdentifier",
"ExternalResource",
"Area",
"Quantity",
"Azimuth",
"Orientation",
"FrameType",
"GlassLayers",
"GlassType",
"GasFill",
"Condition",
"UFactor",
"SHGC",
],
E.VisibleTransmittance(vis_trans),
)
if hasattr(win, "ExteriorShading"):
ext_shade = str(win.ExteriorShading)
win.remove(win.ExteriorShading) # remove ExteriorShading of HPXML v2
add_after(
win,
[
"SystemIdentifier",
"ExternalResource",
"Area",
"Quantity",
"Azimuth",
"Orientation",
"FrameType",
"GlassLayers",
"GlassType",
"GasFill",
"Condition",
"UFactor",
"SHGC",
"VisibleTransmittance",
"NFRCCertified",
"ThirdPartyCertification",
"WindowFilm",
],
E.ExteriorShading(
E.SystemIdentifier(id=f"exterior-shading-{i}"), E.Type(ext_shade)
),
)
if hasattr(win, "Treatments"):
if win.Treatments in ["shading", "solar screen"]:
treatment_shade = E.ExteriorShading(
E.SystemIdentifier(id=f"treatment-shading-{i}"),
)
if win.Treatments == "solar screen":
treatment_shade.append(E.Type("solar screens"))
add_after(
win,
[
"SystemIdentifier",
"ExternalResource",
"Area",
"Quantity",
"Azimuth",
"Orientation",
"FrameType",
"GlassLayers",
"GlassType",
"GasFill",
"Condition",
"UFactor",
"SHGC",
"VisibleTransmittance",
"NFRCCertified",
"ThirdPartyCertification",
"WindowFilm",
],
treatment_shade,
)
elif win.Treatments == "window film":
add_after(
win,
[
"SystemIdentifier",
"ExternalResource",
"Area",
"Quantity",
"Azimuth",
"Orientation",
"FrameType",
"GlassLayers",
"GlassType",
"GasFill",
"Condition",
"UFactor",
"SHGC",
"VisibleTransmittance",
"NFRCCertified",
"ThirdPartyCertification",
],
E.WindowFilm(E.SystemIdentifier(id=f"window-film-{i}")),
)
win.remove(win.Treatments)
if hasattr(win, "InteriorShading"):
cache_interior_shading_type = str(win.InteriorShading)
win.InteriorShading.clear()
win.InteriorShading.append(E.SystemIdentifier(id=f"interior-shading-{i}"))
win.InteriorShading.append(E.Type(cache_interior_shading_type))
# Window/Skylight Interior Shading Fraction
# https://github.com/hpxmlwg/hpxml/pull/189
if hasattr(win, "InteriorShadingFactor"):
# handles a case where `InteriorShadingFactor` is specified without `InteriorShading`
if not hasattr(win, "InteriorShading"):
add_before(
win,
[
"StormWindow",
"MoveableInsulation",
"Overhangs",
"WeatherStripping",
"Operable",
"LeakinessDescription",
"WindowtoWallRatio",
"AttachedToWall",
"AnnualEnergyUse",
"extension",
],
E.InteriorShading(E.SystemIdentifier(id=f"interior-shading-{i}")),
)
win.InteriorShading.extend(
[
E.SummerShadingCoefficient(float(win.InteriorShadingFactor)),
E.WinterShadingCoefficient(float(win.InteriorShadingFactor)),
]
)
win.remove(win.InteriorShadingFactor)
if hasattr(win, "MovableInsulationRValue"):
add_after(
win,
[
"SystemIdentifier",
"ExternalResource",
"Area",
"Quantity",
"Azimuth",
"Orientation",
"FrameType",
"GlassLayers",
"GlassType",
"GasFill",
"Condition",
"UFactor",
"SHGC",
"VisibleTransmittance",
"NFRCCertified",
"ThirdPartyCertification",
"WindowFilm",
"ExteriorShading",
"InteriorShading",
"StormWindow",
],
E.MoveableInsulation(
E.SystemIdentifier(id=f"moveable-insulation-{i}"),
E.RValue(float(win.MovableInsulationRValue)),
),
)
win.remove(win.MovableInsulationRValue)
if hasattr(win, "GlassLayers"):
if win.GlassLayers in [
"single-paned with low-e storms",
"single-paned with storms",
]:
storm_window = E.StormWindow(E.SystemIdentifier(id=f"storm-window-{i}"))
if win.GlassLayers == "single-paned with low-e storms":
storm_window.append(E.GlassType("low-e"))
win.GlassLayers._setText("single-pane")
add_after(
win,
[
"SystemIdentifier",
"ExternalResource",
"Area",
"Quantity",
"Azimuth",
"Orientation",
"FrameType",
"GlassLayers",
"GlassType",
"GasFill",
"Condition",
"UFactor",
"SHGC",
"VisibleTransmittance",
"NFRCCertified",
"ThirdPartyCertification",
"WindowFilm",
"ExteriorShading",
"InteriorShading",
],
storm_window,
)
# Standardize Locations
# https://github.com/hpxmlwg/hpxml/pull/156
for el in root.xpath(
"//h:InteriorAdjacentTo|//h:ExteriorAdjacentTo|//h:DuctLocation|//h:HVACPlant/h:*/h:UnitLocation|//h:WaterHeatingSystem/h:Location|//h:Measure/h:Location", # noqa E501
**xpkw,
):
try:
el._setText(location_map[el.text])
except (KeyError, AttributeError):
pass
# Lighting Fraction Improvements
# https://github.com/hpxmlwg/hpxml/pull/165
ltgidx = 0
for ltgfracs in root.xpath(
"h:Building/h:BuildingDetails/h:Lighting/h:LightingFractions", **xpkw
):
ltg = ltgfracs.getparent()
for ltgfrac in ltgfracs.getchildren():
ltgidx += 1
ltggroup = E.LightingGroup(
E.SystemIdentifier(id=f"lighting-fraction-{ltgidx}"),
E.FractionofUnitsInLocation(ltgfrac.text),
E.LightingType(),
)
if ltgfrac.tag == f"{{{hpxml3_ns}}}FractionIncandescent":
ltggroup.LightingType.append(E.Incandescent())
elif ltgfrac.tag == f"{{{hpxml3_ns}}}FractionCFL":
ltggroup.LightingType.append(E.CompactFluorescent())
elif ltgfrac.tag == f"{{{hpxml3_ns}}}FractionLFL":
ltggroup.LightingType.append(E.FluorescentTube())
elif ltgfrac.tag == f"{{{hpxml3_ns}}}FractionLED":
ltggroup.LightingType.append(E.LightEmittingDiode())
add_after(ltg, ["LightingGroup"], ltggroup)
ltg.remove(ltgfracs)
# Deprecated items
# https://github.com/hpxmlwg/hpxml/pull/167
# Removes WaterHeaterInsulation/Pipe; use HotWaterDistribution/PipeInsulation instead
for i, pipe in enumerate(root.xpath("//h:WaterHeaterInsulation/h:Pipe", **xpkw), 1):
waterheating = pipe.getparent().getparent().getparent()
waterheatingsystem = pipe.getparent().getparent()
waterheatingsystem_idref = str(waterheatingsystem.SystemIdentifier.attrib["id"])
try:
hw_dist = waterheating.xpath(
"h:HotWaterDistribution[h:AttachedToWaterHeatingSystem/@idref=$sysid]",
sysid=waterheatingsystem_idref,
**xpkw,
)[0]
add_after(
hw_dist,
[
"SystemIdentifier",
"ExternalResource",
"AttachedToWaterHeatingSystem",
"SystemType",
],
E.PipeInsulation(E.PipeRValue(float(pipe.PipeRValue))),
)
except IndexError: # handles when there is no attached hot water distribution system
add_after(
waterheating,
["WaterHeatingSystem", "WaterHeatingControl"],
E.HotWaterDistribution(
E.SystemIdentifier(id=f"hotwater-distribution-{i}"),
E.AttachedToWaterHeatingSystem(idref=waterheatingsystem_idref),
E.PipeInsulation(E.PipeRValue(float(pipe.PipeRValue))),
),
)
waterheaterinsualtion = pipe.getparent()
waterheaterinsualtion.remove(pipe)
if waterheaterinsualtion.countchildren() == 0:
waterheaterinsualtion.getparent().remove(waterheaterinsualtion)
# Removes PoolPump/HoursPerDay; use PoolPump/PumpSpeed/HoursPerDay instead
for poolpump_hour in root.xpath("//h:PoolPump/h:HoursPerDay", **xpkw):
poolpump = poolpump_hour.getparent()
if not hasattr(poolpump, "PumpSpeed"):
add_before(
poolpump,
["extension"],
E.PumpSpeed(E.HoursPerDay(float(poolpump_hour))),
)
else:
add_before(
poolpump.PumpSpeed, ["extension"], E.HoursPerDay(float(poolpump_hour))
)
poolpump.remove(poolpump_hour)
# Removes "indoor water " (note extra trailing space) enumeration from WaterType
for watertype in root.xpath("//h:WaterType", **xpkw):
if watertype == "indoor water ":
watertype._setText(str(watertype).rstrip())
# Adds desuperheater flexibility
# https://github.com/hpxmlwg/hpxml/pull/184
for el in root.xpath("//h:WaterHeatingSystem/h:RelatedHeatingSystem", **xpkw):
el.tag = f"{{{hpxml3_ns}}}RelatedHVACSystem"
for el in root.xpath("//h:WaterHeatingSystem/h:HasGeothermalDesuperheater", **xpkw):
el.tag = f"{{{hpxml3_ns}}}UsesDesuperheater"
# Handle PV inverter efficiency value
# https://github.com/hpxmlwg/hpxml/pull/207
for inverter_efficiency in root.xpath("//h:InverterEfficiency", **xpkw):
if float(inverter_efficiency) > 1:
inverter_efficiency._setText(str(float(inverter_efficiency) / 100.0))
# Write out new file
hpxml3_doc.write(pathobj_to_str(hpxml3_file), pretty_print=True, encoding="utf-8")
hpxml3_schema.assertValid(hpxml3_doc)
def convert_hpxml3_to_4(
hpxml3_file: File, hpxml4_file: File, version: str = "4.0"
) -> None:
"""Convert an HPXML v3 file to HPXML v4
:param hpxml3_file: HPXML v3 input file
:type hpxml3_file: pathlib.Path, str, or file-like
:param hpxml4_file: HPXML v4 output file
:type hpxml4_file: pathlib.Path, str, or file-like
"""
if version not in get_hpxml_versions(major_version=4):
raise exc.HpxmlTranslationError(
"convert_hpxml3_to_4 must have valid target version of 4.x, got {version}."
)
# Load Schemas
schemas_dir = pathlib.Path(__file__).resolve().parent / "schemas"
hpxml3_schema_doc = etree.parse(str(schemas_dir / "v3.0" / "HPXML.xsd"))
hpxml3_ns = hpxml3_schema_doc.getroot().attrib["targetNamespace"]
hpxml3_schema = etree.XMLSchema(hpxml3_schema_doc)
hpxml4_schema_doc = etree.parse(str(schemas_dir / "v4.0" / "HPXML.xsd"))
hpxml4_ns = hpxml4_schema_doc.getroot().attrib["targetNamespace"]
hpxml4_schema = etree.XMLSchema(hpxml4_schema_doc)
E = objectify.ElementMaker(
namespace=hpxml3_ns, nsmap={None: hpxml3_ns}, annotate=False
)
xpkw = {"namespaces": {"h": hpxml3_ns}}
# Ensure we're working with valid HPXML v3.x
hpxml3_doc = objectify.parse(pathobj_to_str(hpxml3_file))
hpxml3_schema.assertValid(hpxml3_doc)
# Change the namespace of every element to use the HPXML v4 namespace
# https://stackoverflow.com/a/51660868/11600307
change_ns_xslt = etree.parse(
str(pathlib.Path(__file__).resolve().parent / "change_namespace.xsl")
)
hpxml4_doc = hpxml3_doc.xslt(
change_ns_xslt, orig_namespace=f"'{hpxml3_ns}'", new_namespace=f"'{hpxml4_ns}'"
)
root = hpxml4_doc.getroot()
# Change version
root.attrib["schemaVersion"] = "4.0"
# Move some FoundationWall/Slab insulation properties into their Layer elements
# https://github.com/hpxmlwg/hpxml/pull/215
for fwall in root.xpath("//h:FoundationWall", **xpkw):
if hasattr(fwall, "DistanceToTopOfInsulation"):
for il in fwall.xpath("h:Insulation/h:Layer", **xpkw):
add_before(
il,
["extension"],
E.DistanceToTopOfInsulation(fwall.DistanceToTopOfInsulation.text),
)
fwall.remove(fwall.DistanceToTopOfInsulation)
if hasattr(fwall, "DistanceToBottomOfInsulation"):
for il in fwall.xpath("h:Insulation/h:Layer", **xpkw):
add_before(
il,
["extension"],
E.DistanceToBottomOfInsulation(
fwall.DistanceToBottomOfInsulation.text
),
)
fwall.remove(fwall.DistanceToBottomOfInsulation)
for slab in root.xpath("//h:Slab", **xpkw):
if hasattr(slab, "PerimeterInsulationDepth"):
for il in slab.xpath("h:PerimeterInsulation/h:Layer", **xpkw):
add_before(
il,
["extension"],
E.InsulationDepth(slab.PerimeterInsulationDepth.text),
)
slab.remove(slab.PerimeterInsulationDepth)
if hasattr(slab, "UnderSlabInsulationWidth"):
for il in slab.xpath("h:UnderSlabInsulation/h:Layer", **xpkw):
add_before(
il,
["extension"],
E.InsulationWidth(slab.UnderSlabInsulationWidth.text),
)
slab.remove(slab.UnderSlabInsulationWidth)
if hasattr(slab, "UnderSlabInsulationSpansEntireSlab"):
for il in slab.xpath("h:UnderSlabInsulation/h:Layer", **xpkw):
add_before(
il,
["extension"],
E.InsulationSpansEntireSlab(
slab.UnderSlabInsulationSpansEntireSlab.text
),
)
slab.remove(slab.UnderSlabInsulationSpansEntireSlab)
# Battery Capacity
# https://github.com/hpxmlwg/hpxml/pull/296
for battery in root.xpath("//h:Battery", **xpkw):
if hasattr(battery, "NominalCapacity"):
value = battery.NominalCapacity.text
battery.NominalCapacity._setText(None)
battery.NominalCapacity.append(E.Units("Ah"))
battery.NominalCapacity.append(E.Value(value))
if hasattr(battery, "UsableCapacity"):
value = battery.UsableCapacity.text
battery.UsableCapacity._setText(None)
battery.UsableCapacity.append(E.Units("Ah"))
battery.UsableCapacity.append(E.Value(value))
# Write out new file
hpxml4_doc.write(pathobj_to_str(hpxml4_file), pretty_print=True, encoding="utf-8")
hpxml4_schema.assertValid(hpxml4_doc)
```
#### File: hpxml_version_translator/test/test_converter_cli.py
```python
import io
from lxml import objectify
import pathlib
import tempfile
from hpxml_version_translator import main
from hpxml_version_translator.converter import get_hpxml_versions
hpxml_dir = pathlib.Path(__file__).resolve().parent / "hpxml_v2_files"
def test_cli(capsysbinary):
with tempfile.TemporaryDirectory() as tmpdir:
tmppath = pathlib.Path(tmpdir).resolve()
input_filename = str(hpxml_dir / "version_change.xml")
output_filename = str(tmppath / "out.xml")
main([input_filename, "-o", output_filename])
root = objectify.parse(output_filename).getroot()
assert root.attrib["schemaVersion"] == "3.0"
main([input_filename])
f = io.BytesIO(capsysbinary.readouterr().out)
root = objectify.parse(f).getroot()
assert root.attrib["schemaVersion"] == "3.0"
def test_cli_to_v2(capsysbinary):
input_filename = str(
pathlib.Path(__file__).resolve().parent
/ "hpxml_v1_files"
/ "version_change.xml"
)
main([input_filename, "-v", "2.3"])
f = io.BytesIO(capsysbinary.readouterr().out)
root = objectify.parse(f).getroot()
assert root.attrib["schemaVersion"] == "2.3"
def test_schema_versions():
hpxml_versions = get_hpxml_versions()
assert "3.0" in hpxml_versions
assert "2.3" in hpxml_versions
assert "1.1.1" not in hpxml_versions
hpxml_versions = get_hpxml_versions(major_version=3)
assert "3.0" in hpxml_versions
assert "2.3" not in hpxml_versions
assert "1.1.1" not in hpxml_versions
```
#### File: hpxml_version_translator/test/test_converter_v3.py
```python
import io
from lxml import objectify
import pathlib
import pytest
import tempfile
from hpxml_version_translator.converter import (
convert_hpxml3_to_4,
convert_hpxml_to_version,
)
from hpxml_version_translator import exceptions as exc
hpxml_dir = pathlib.Path(__file__).resolve().parent / "hpxml_v3_files"
def convert_hpxml_and_parse(input_filename, version="4.0"):
with tempfile.NamedTemporaryFile("w+b") as f_out:
convert_hpxml_to_version(version, input_filename, f_out)
f_out.seek(0)
root = objectify.parse(f_out).getroot()
return root
def test_version_change_to_4():
root = convert_hpxml_and_parse(hpxml_dir / "version_change.xml")
assert root.attrib["schemaVersion"] == "4.0"
def test_enclosure_foundation():
root = convert_hpxml_and_parse(hpxml_dir / "enclosure_foundation.xml")
for i in (0, 1):
fw1 = root.Building[i].BuildingDetails.Enclosure.FoundationWalls.FoundationWall[
0
]
assert not hasattr(fw1, "DistanceToTopOfInsulation")
assert not hasattr(fw1, "DistanceToBottomOfInsulation")
fw2 = root.Building[i].BuildingDetails.Enclosure.FoundationWalls.FoundationWall[
1
]
assert not hasattr(fw2, "DistanceToTopOfInsulation")
assert not hasattr(fw2, "DistanceToBottomOfInsulation")
assert fw2.Insulation.Layer[0].DistanceToTopOfInsulation == 1.0
assert fw2.Insulation.Layer[1].DistanceToTopOfInsulation == 1.0
assert fw2.Insulation.Layer[0].DistanceToBottomOfInsulation == 5.0
assert fw2.Insulation.Layer[1].DistanceToBottomOfInsulation == 5.0
sl1 = root.Building[i].BuildingDetails.Enclosure.Slabs.Slab[0]
assert not hasattr(fw1, "PerimeterInsulationDepth")
assert not hasattr(fw1, "UnderSlabInsulationWidth")
assert not hasattr(fw1, "UnderSlabInsulationSpansSlab")
assert sl1.PerimeterInsulation.Layer[0].InsulationDepth == 2.0
assert sl1.UnderSlabInsulation.Layer[0].InsulationWidth == 1.0
assert not sl1.UnderSlabInsulation.Layer[0].InsulationSpansEntireSlab
def test_battery():
root = convert_hpxml_and_parse(hpxml_dir / "battery.xml")
b1 = root.Building[0].BuildingDetails.Systems.Batteries.Battery[0]
assert b1.NominalCapacity.Units == "Ah"
assert b1.NominalCapacity.Value == 1000
assert b1.UsableCapacity.Units == "Ah"
assert b1.UsableCapacity.Value == 800
b2 = root.Building[0].BuildingDetails.Systems.Batteries.Battery[1]
assert b2.NominalCapacity.Units == "Ah"
assert b2.NominalCapacity.Value == 2000
assert b2.UsableCapacity.Units == "Ah"
assert b2.UsableCapacity.Value == 1600
def test_mismatch_version():
f_out = io.BytesIO()
with pytest.raises(
exc.HpxmlTranslationError,
match=r"convert_hpxml3_to_4 must have valid target version of 4\.x",
):
convert_hpxml3_to_4(hpxml_dir / "version_change.xml", f_out, "2.0")
```
|
{
"source": "jeffg2k/avclass",
"score": 2
}
|
#### File: avclass/avclass/common.py
```python
import logging
import operator
import re
import string
import sys
from avclass import util
from collections import defaultdict, namedtuple
from typing import AnyStr, Callable, Collection, Dict, List, Optional, Set, Tuple, Union
logger = logging.getLogger(__name__)
# Prefix to identify platform tags
platform_prefix = "FILE:os:"
# Default category for tags in taxonomy with no category
uncategorized_cat = "UNC"
SampleInfo = namedtuple("SampleInfo", ["md5", "sha1", "sha256", "labels", "vt_tags"])
# AVs to use in suffix removal
suffix_removal_av_set = {
"Norman",
"Avast",
"Avira",
"Kaspersky",
"ESET-NOD32",
"Fortinet",
"Jiangmin",
"Comodo",
"GData",
"Avast",
"Sophos",
"BitDefenderTheta",
"Alibaba",
"Tencent",
"Cyren",
"Arcabit",
"TrendMicro-HouseCall",
"TrendMicro",
"NANO-Antivirus",
"Microsoft",
}
class Tag:
""" A Tag in the taxonomy """
def __init__(self, s):
word_list = s.strip().split(":")
if len(word_list) > 1:
self._name = word_list[-1].lower()
self._cat = word_list[0].upper()
self._prefix_l = [x.lower() for x in word_list[1:-1]]
path = self._cat
for x in self._prefix_l:
path = path + ":" + x
self._path = path + ":" + self._name
else:
self._name = word_list[0].lower()
self._cat = uncategorized_cat
self._prefix_l = []
self._path = self._name
def __hash__(self):
""" Return hash """
return hash((self._path))
@property
def name(self):
""" Return tag name """
return self._name
@property
def cat(self):
""" Return tag category """
return self._cat
@property
def path(self):
""" Return tag path """
return self._path
@property
def prefix_l(self):
""" Return tag prefix list """
return self._prefix_l
class Taxonomy:
"""
Contains tags and generic tokens read from filesystem
"""
def __init__(self, filepath: Optional[AnyStr]):
"""
Initialize and populate the Tag map from ``filepath``
:param filepath: Path to taxonomy data
"""
self._tags = set()
self._tag_map = {}
if filepath:
self.read_taxonomy(filepath)
def __len__(self) -> int:
"""
The number of tags contained in __tag_map (divided by 2 because we store paths there too)
:return: The length (int) of the Taxonomy
"""
return len(self._tags)
def __iter__(self):
""" Iterator over the alphabetically sorted tags in the taxonomy """
return (t for t in sorted(self._tags))
def is_generic(self, tag: AnyStr) -> bool:
"""
Whether or not the input ``tag`` is generic
:param tag: The tag
:return: Boolean
"""
t = self._tag_map.get(tag, None)
return getattr(t, "cat", None) == "GEN"
def is_tag(self, tag: AnyStr) -> bool:
"""
Whether this Taxonomy is aware of ``tag``
:param tag: The tag
:return: Boolean
"""
return tag in self._tag_map
def add_tag(self, s: AnyStr, override: bool = False):
"""
Add a tag (``s``) to the Taxonomy. Collisions are only replaced if ``override`` is truthy.
:param s: A string to create a Tag from
:param override: Whether or not to replace a duplicate if present
:return: None
"""
tag = Tag(s)
t = self._tag_map.get(tag.name, None)
if t and (t.path != tag.path):
if override:
logger.warning("[Taxonomy] Replacing %s with %s\n" % t.path, tag.path)
del self._tag_map[t.path]
else:
return
logger.debug("[Taxonomy] Adding tag %s" % s)
self._tag_map[tag.name] = tag
self._tag_map[tag.path] = tag
def remove_tag(self, tag: AnyStr) -> bool:
"""
Remove a Tag from the Taxonomy.
:param tag: The tag to remove
:return: Whether or not the tag was present
"""
t = self._tag_map.get(tag, None)
if tag:
logger.debug("[Taxonomy] Removing tag: %s" % t.path)
del self._tag_map[t.name]
del self._tag_map[t.path]
self._tags.remove(tag)
return t is not None
def get_category(self, tag: AnyStr) -> AnyStr:
"""
Return the tag's category or "UNK" if it's not a tag.
:param tag: The tag
:return: The category
"""
t = self._tag_map.get(tag, None)
return getattr(t, "cat", "UNK")
def get_path(self, tag: AnyStr) -> AnyStr:
"""
Get a tag's full path.
:param tag: The tag
:return: The tag's path
"""
t = self._tag_map.get(tag, None)
return getattr(t, "path", f"UNK:{tag}")
def get_prefix_l(self, tag: AnyStr) -> List[AnyStr]:
"""
Get a tag's prefix list.
:param tag: The tag
:return: The tag's prefix list
"""
t = self._tag_map.get(tag, None)
return getattr(t, "prefix_l", [])
def get_prefix(self, tag: AnyStr) -> List[AnyStr]:
"""
Get a tag's prefixes.
:param tag: The tag
:return: String representation of the tag's full prefix
"""
t = self._tag_map.get(tag, None)
tag_pfx = tag.path.split(":")[:-1]
return t.prefix_l if t else tag_pfx
def get_depth(self, tag: AnyStr) -> int:
"""
Determine the "depth" (token count) of the tag
:param tag: The tag
:return: The depth (int) of the tag
"""
t = self._tag_map.get(tag, None)
if t:
return len(tag.prefix_l) + 2
return 0
def get_info(self, tag: AnyStr) -> Tuple[AnyStr, AnyStr]:
"""
Get tag info (path, category) or "UNK:tag"
:param tag: The tag
:return: Tuple containing tag.path and tag.cat
"""
t = self._tag_map.get(tag, None)
if t:
return t.path, t.cat
return f"UNK:{tag}", "UNK"
def expand(self, tag: AnyStr) -> List[AnyStr]:
"""
Return tag prefixes that are leaf-nodes
:param tag: The tag
:return: A list of prefixes
"""
t = self._tag_map.get(tag, None)
if t:
return [x for x in t.prefix_l if x in self._tag_map]
return []
def platform_tags(self) -> Set[AnyStr]:
"""
Returns a set of platform tags in the Taxonomy
:return: Set of platformn tags
"""
return {
tag.name
for _, tag in self._tag_map.items()
if tag.path.startswith(platform_prefix)
}
def overlaps(self, t1: AnyStr, t2: AnyStr) -> bool:
"""
Whether or not the two tags overlap
:param t1: The first Tag
:param t2: The second Tag
:return: Boolean
"""
m1 = self.get_prefix_l(t1)
m2 = self.get_prefix_l(t2)
return t1 in m2 or t2 in m1
def remove_overlaps(
self, l: Collection[AnyStr]
) -> Union[Collection[AnyStr], List[AnyStr]]:
"""
Returns list with overlapping tags removed
:param l: The list
:return: Deduped list
"""
# TODO - code smell
if not l:
return l
pair_l = sorted([(self.get_depth(t), t) for t in l])
out_l = [pair_l.pop()[1]]
while pair_l:
t = pair_l.pop()[1]
if not any(self.overlaps(t, e) for e in out_l):
out_l.append(t)
return out_l
def read_taxonomy(self, filepath: AnyStr):
"""
Create Taxonomy from file (tab-separated lines)
:param filepath: The path of the file to read
:return: None
"""
with open(filepath, "r") as fd:
for line in fd:
line = line.strip()
if not line.startswith("#") and line:
self.add_tag(line)
def to_file(self, filepath: AnyStr):
"""
Write sorted Taxonomy to a file (tab-separated lines)
:param filepath: The path to write
:return: None
"""
with open(filepath, "w") as fd:
tag_l = sorted(self._tag_map.items(), key=lambda item: item[1].path)
idx = 0
for name, tag in tag_l:
if (idx % 2) == 0:
fd.write(tag.path + "\n")
idx += 1
class Rules:
"""
Map a single source with one or more destinations
"""
def __init__(self, filepath: Optional[AnyStr]):
"""
Initialize the rule-map and read rules from ``filepath``
:param filepath: The file to read from
"""
self._src_map = {}
if filepath:
self.read_rules(filepath)
def __len__(self):
"""
The number of rules/src in the rule-map
:return: Number of rules
"""
return len(self._src_map)
def add_rule(
self, src: AnyStr, dst_l: Collection[AnyStr] = None, overwrite: bool = False
):
"""
Add a rule to the map. On duplicate, append destinations. If ``overwrite`` is set, replace rule src/dst.
:param src: The source tag
:param dst_l: The destination list
:param overwrite: Whether or not to overwrite duplicates
:return: None
"""
# Remove src from dst_l if it exists
dst_l = filter(lambda x: x != src, dst_l)
if not dst_l:
return
logger.debug("[Rules] Adding %s -> %s" % (src, dst_l))
src_tag = Tag(src)
if overwrite:
target_l = [Tag(dst).name for dst in dst_l]
self._src_map[src_tag.name] = set(target_l)
else:
curr_dst = self._src_map.get(src_tag.name, set())
for dst in dst_l:
dst_tag = Tag(dst)
curr_dst.add(dst_tag.name)
self._src_map[src_tag.name] = curr_dst
def remove_rule(self, src: AnyStr) -> bool:
dst = self._src_map.get(src, [])
if dst:
logger.debug("[Rules] Removing rule: %s -> %s" % (src, dst))
del self._src_map[src]
return True
return False
def get_dst(self, src: AnyStr) -> List[AnyStr]:
"""
Returns a the dst belonging to src or an empty list.
:param src: The source rule
:return: List of dst
"""
return list(self._src_map.get(src, []))
def read_rules(self, filepath: AnyStr):
"""
Read rules from a file and create the rule-map.
:param filepath: The path of the file to read
:return: None
"""
with open(filepath, "r") as fd:
for line in fd:
line = line.strip()
if not line.startswith("#") and line:
word_list = line.split()
if len(word_list) > 1:
self.add_rule(word_list[0], word_list[1:])
def to_file(self, filepath: AnyStr, taxonomy: Taxonomy = None):
"""
Write current rules to the file at ``filepath``.
:param filepath: The path of the file to write
:param taxonomy: A Taxonomy to optionally resolve full tag paths
:return: None
"""
with open(filepath, "w") as fd:
for src, dst_set in sorted(self._src_map.items()):
dst_l = sorted(dst_set)
if taxonomy:
src_path = taxonomy.get_path(src)
path_l = [taxonomy.get_path(t) for t in dst_l]
dst_str = "\t".join(path_l)
fd.write("%s\t%s\n" % (src_path, dst_str))
else:
dst_str = "\t".join(dst_l)
fd.write("%s\t%s\n" % (src, dst_str))
def expand_src_destinations(self, src: AnyStr) -> Set[AnyStr]:
"""
Return a list of all expanded destinations for ``src``
:param src: The source
:return: List of expanded destinations
"""
# TODO - this only goes one layer deep it seems. Not actually recursive
dst_set = self._src_map.get(src, set())
out = set()
while dst_set:
dst = dst_set.pop()
dst_l = self._src_map.get(dst, [])
if dst_l:
for d in dst_l:
if d not in out and d != dst:
dst_set.add(d)
else:
out.add(dst)
return out
def expand_all_destinations(self):
"""
Expand/resolve all sources in the rule-map
:return: None
"""
src_l = self._src_map.keys()
for src in src_l:
dst_l = self.expand_src_destinations(src)
self._src_map[src] = dst_l
class Translation(Rules):
"""
Translations are a set of rules that convert between unknown labels and labels that are in our Taxonomy
"""
def __init__(self, filepath: AnyStr):
super().__init__(filepath)
def validate(self, taxonomy: Taxonomy):
"""
Ensure all "destination" labels are in the Taxonomy.
:param taxonomy: The Taxonomy to use for checking
:return: None
"""
for tok, tag_l in self._src_map.items():
if taxonomy.is_tag(tok):
sys.stdout.write("[Tagging] SRC %s in taxonomy\n" % tok)
for t in tag_l:
if not taxonomy.is_tag(t):
sys.stdout.write("[Tagging] %s not in taxonomy\n" % t)
# TODO - raise or return False?
class Expansion(Rules):
"""
Expansions are rules that allow us to map a single label (src) to all explicit and implicit labels
"""
def __init__(self, filepath: AnyStr):
super().__init__(filepath)
def validate(self, taxonomy: Taxonomy):
"""
Ensure all "source" and "destination" labels are in the Taxonomy.
:param taxonomy: The Taxonomy to use for checking
:return: None
"""
for src, dst_set in self._src_map.items():
if not taxonomy.is_tag(src):
sys.stdout.write("[Expansion] %s not in taxonomy\n" % src)
# TODO - raise or return False?
for dst in dst_set:
if not taxonomy.is_tag(dst):
sys.stdout.write("[Expansion] %s not in taxonomy\n" % dst)
# TODO - raise or return False?
class AvLabels:
"""
Primary class used to interpret AV Labels
"""
def __init__(
self,
tag_file: AnyStr = util.DEFAULT_TAG_PATH,
exp_file: AnyStr = util.DEFAULT_EXP_PATH,
tax_file: AnyStr = util.DEFAULT_TAX_PATH,
av_file: AnyStr = None,
alias_detect: bool = False,
):
self.taxonomy = Taxonomy(tax_file)
self.translations = Translation(tag_file)
self.expansions = Expansion(exp_file)
self.avs = self.read_avs(av_file) if av_file else None
# Alias statistics initialization
self.alias_detect = alias_detect
def get_sample_call(self, data_type: AnyStr) -> Callable:
"""
Return the correct parser for the report type
:param data_type: the type of file vt2, vt3, lb, md
:return: Callable function that returns SampleInfo
"""
if data_type == "lb":
return self.get_sample_info_lb
elif data_type == "vt" or data_type == "vt2":
return self.get_sample_info_vt_v2
elif data_type == "vt3":
return self.get_sample_info_vt_v3
elif data_type == "md":
return self.get_sample_info_md
else:
sys.stderr.write(
"Invalid data type for sample: %s (should be vt, vt2, vt3, lb, md)"
% data_type
)
return self.get_sample_info_vt_v3
@staticmethod
def read_avs(avs_file: AnyStr) -> Set[AnyStr]:
"""
Read AV engines from ``avs_file``
:param avs_file: The file to read
:return: A set containing the engines
"""
with open(avs_file) as fd:
avs = set(map(str.strip, fd.readlines()))
return avs
@staticmethod
def get_sample_info_lb(record: Dict) -> SampleInfo:
"""
Convert simplified JSON to a SampleInfo object
:param record: The JSON record
:return: An instance of SampleInfo
"""
return SampleInfo(
record["md5"], record["sha1"], record["sha256"], record["av_labels"], []
)
@staticmethod
def get_sample_info_vt_v2(record: Dict) -> SampleInfo:
"""
Convert VirusTotal (v2) JSON to a SampleInfo object
:param record: The JSON record
:return: An instance of SampleInfo
"""
try:
scans = record["scans"]
md5 = record["md5"]
sha1 = record["sha1"]
sha256 = record["sha256"]
except KeyError:
return None
# Obtain labels from scan results
label_pairs = []
for av, res in scans.items():
if res["detected"]:
label = res["result"]
clean_label = "".join(
filter(lambda x: x in string.printable, label)
).strip()
label_pairs.append((av, clean_label))
vt_tags = record.get("tags", [])
return SampleInfo(md5, sha1, sha256, label_pairs, vt_tags)
@staticmethod
def get_sample_info_vt_v3(record: Dict) -> SampleInfo:
"""
Convert VirusTotal (v3) JSON to a SampleInfo object
:param record: The JSON record
:return: An instance of SampleInfo
"""
if "data" in record:
record = record["data"]
try:
scans = record["attributes"]["last_analysis_results"]
md5 = record["attributes"]["md5"]
sha1 = record["attributes"]["sha1"]
sha256 = record["attributes"]["sha256"]
except KeyError:
return None
# Obtain labels from scan results
label_pairs = []
for av, res in scans.items():
label = res["result"]
if label is not None:
clean_label = "".join(
filter(lambda x: x in string.printable, label)
).strip()
label_pairs.append((av, clean_label))
vt_tags = record["attributes"].get("tags", [])
return SampleInfo(md5, sha1, sha256, label_pairs, vt_tags)
@staticmethod
def get_sample_info_md(record: Dict) -> SampleInfo:
"""
Convert OPSWAT MetaDefender JSON to a SampleInfo object
:param record: The JSON record
:return: An instance of SampleInfo
"""
try:
scans = record["scan_results"]["scan_details"]
md5 = record["file_info"]["md5"]
sha1 = record["file_info"]["sha1"]
sha256 = record["file_info"]["sha256"]
except KeyError:
return None
# Obtain labels from scan results
label_pairs = []
for av, res in scans.items():
label = res["threat_found"]
if label is not None and res["scan_result_i"] == 1:
clean_label = "".join(
filter(lambda x: x in string.printable, label)
).strip()
label_pairs.append((av, clean_label))
return SampleInfo(md5, sha1, sha256, label_pairs, [])
@staticmethod
def is_pup(tag_pairs: List[Tuple], taxonomy: Taxonomy) -> Optional[bool]:
"""
Attempts to classify a sample (represented by ``tag_pairs``) as a PUP. We accomplish this by checking for the
"grayware" label in the highest ranked CLASS.
:param tag_pairs: List of tuples containing a label, and rank (int)
:param taxonomy: The Taxonomy
:return: bool or None
"""
threshold = 0.5
if len(tag_pairs) < 1:
return None
max_ctr = tag_pairs[0][1]
for tag, ctr in tag_pairs:
path, cat = taxonomy.get_info(tag)
if cat == "CLASS":
if "grayware" in path:
return float(ctr) >= float(max_ctr) * threshold
else:
return False
return False
@staticmethod
def _remove_suffixes(av_name: AnyStr, label: AnyStr) -> AnyStr:
"""
Remove vendor-specific suffixes from the label
:param av_name: The AV name to remove
:param label: The label to change
:return: The new label
"""
# Truncate after last '.'
if av_name in suffix_removal_av_set:
label = label.rsplit(".", 1)[0]
# Truncate after last '.'
# if suffix only contains digits or uppercase (no lowercase) chars
if av_name == "AVG":
tokens = label.rsplit(".", 1)
if len(tokens) > 1 and re.match("^[A-Z0-9]+$", tokens[1]):
label = tokens[0]
# Truncate after last '!'
if av_name == "Agnitum":
label = label.rsplit("!", 1)[0]
return label
def get_label_tags(self, label: AnyStr, hashes: Collection[AnyStr]) -> Set[AnyStr]:
"""
Tokenize, translate, and filter a label into tags. ``hashes`` are used to provide a dynamic filter of sorts.
We don't want to tokenize parts of the sample's hash which is a common thing for some AV vendors.
:param label: The label to convert
:param hashes: A list of hashes to be used as dynamic filters
:return: A set of tags that were extracted from the label
"""
tags = set()
# If empty label, nothing to do
if not label:
return tags
# Split label into tokens and process each token
for token in re.split("[^0-9a-zA-Z]", label):
# Convert token to lowercase
token = token.lower()
# Remove digits at the end
end_len = len(re.findall("\d*$", token)[0])
if end_len:
token = token[:-end_len]
# Ignore token if prefix of a hash of the sample
# Most AVs use MD5 prefixes in labels,
# but we check SHA1 and SHA256 as well
if any([h.startswith(token) for h in hashes]):
continue
# Ignore generic tokens
if self.taxonomy.is_generic(token):
continue
# Apply tagging rule
dst_l = self.translations.get_dst(token)
if dst_l:
# Ignore generic tokens
for t in dst_l:
if not self.taxonomy.is_generic(t):
tags.add(t)
# Add token if longer than 3 characters and no tagging rule
elif len(token) > 3:
tags.add(token)
# Return tags
return tags
def _expand(self, tag_set: Set[AnyStr]) -> Set[AnyStr]:
"""
Expand tags into more tags using expansion rules and the Taxonomy
:param tag_set: Starting set of tags
:return: Expanded set of tags
"""
ret = set()
for t in tag_set:
# Include tag
ret.add(t)
# Include target of expansion rule in output
ret.update(self.expansions.get_dst(t))
# Include implicit expansions in taxonomy
ret.update(self.taxonomy.expand(t))
return ret
def get_sample_tags(self, sample_info: SampleInfo) -> Dict[AnyStr, List[AnyStr]]:
"""
Get a dictionary where the key is a tag and the value is a list of AV engines that confirmed that tag.
:param sample_info: The SampleInfo object to inspect
:return: A dictionary where k,v = tag,[av, ...]
"""
duplicates = set()
av_dict = defaultdict(list)
# Process each AV label
for av_name, label in sample_info.labels:
if not label or (self.avs and av_name not in self.avs):
continue
# Emsisoft uses same label as
# GData/ESET-NOD32/BitDefender/Ad-Aware/MicroWorld-eScan,
# but suffixes ' (B)' to their label. Remove the suffix.
label = label.rstrip(" (B)")
# F-Secure uses Avira's engine since Nov. 2018
# but prefixes 'Malware.' to Avira's label. Remove the prefix.
label = label.lstrip("Malware.")
# Other engines often use exactly the same label, e.g.,
# AVG/Avast
# K7Antivirus/K7GW
# Kaspersky/ZoneAlarm
if label in duplicates:
continue
duplicates.add(label)
label = self._remove_suffixes(av_name, label)
hashes = [sample_info.md5, sample_info.sha1, sample_info.sha256]
tags = self.get_label_tags(label, hashes)
# NOTE: Avoid expansion when aliases are set
expanded_tags = tags if self.alias_detect else self._expand(tags)
# store av vendors for each tag
for t in expanded_tags:
av_dict[t].append(av_name)
return av_dict
@staticmethod
def rank_tags(
av_dict: Dict[AnyStr, List[AnyStr]], threshold: int = 1
) -> List[Tuple[AnyStr, int]]:
"""
Get a list of tuples containing a tag and the number of AV that confirmed that tag sorted by number of AV
(descending).
:param av_dict: The AV dictionary (from ``get_sample_tags()``)
:param threshold: The minimum rank/count to include
:return: A sorted list of tag, av-count pairs
"""
pairs = ((t, len(avs)) for t, avs in av_dict.items() if len(avs) > threshold)
return sorted(pairs, key=operator.itemgetter(1, 0), reverse=True)
```
#### File: avclass/avclass/labeler.py
```python
import argparse
import gzip
import os
import json
import sys
import traceback
from io import StringIO
from operator import itemgetter
from pathlib import Path
from typing import AnyStr, Dict, List, NamedTuple, Optional, Tuple, Union
try:
from avclass.common import AvLabels, Taxonomy
from avclass import clustering as ec, util
except ModuleNotFoundError:
# Helps find the avclasses when run from console
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from avclass.common import AvLabels, Taxonomy
from avclass import clustering as ec, util
class AVClassLabeler:
def __init__(self, av_labels: AvLabels = AvLabels()):
self.av_labels = av_labels
self.output = {"labels": []}
self.hash_type = None
self.ground_truth = None
self.get_sample_info = None
self.console = False
self.av_tags = False
self.stats_export = False
self.family_only = False
self.pup_classify = False
self.path_export = False
self.vt_tags = False
self.vt_all = 0
self.first_token_dict = {}
self.token_count_map = {}
self.pair_count_map = {}
self.avtags_dict = {}
self.gt_dict = {}
self.stats = {
"samples": 0,
"noscans": 0,
"tagged": 0,
"maltagged": 0,
"FAM": 0,
"CLASS": 0,
"BEH": 0,
"FILE": 0,
"UNK": 0,
}
def run(
self,
files: Union[
AnyStr,
List[AnyStr],
Path,
List[Path],
StringIO,
List[StringIO],
Dict,
List[Dict],
],
data_type: Optional[AnyStr] = "vt3",
hash_type: Optional[AnyStr] = "md5",
ground_truth: Optional[AnyStr] = None,
stats_export: bool = False,
vt_tags: bool = False,
av_tags: bool = False,
pup_classify: bool = False,
path_export: bool = False,
family_only: bool = False,
console: bool = False,
) -> List[Dict]:
# Set class arguments
self.console = console
self.ground_truth = ground_truth
self.av_tags = av_tags
self.stats_export = stats_export
self.family_only = family_only
self.pup_classify = pup_classify
self.path_export = path_export
self.vt_tags = vt_tags
# Select hash used to identify sample, by default MD5
self.hash_type = self.get_hash_type(hash_type)
# Select file type used for sampling
data_type = data_type if data_type else "vt3"
self.get_sample_info = self.av_labels.get_sample_call(data_type)
# Select output prefix
if isinstance(files, list) and isinstance(files[0], str):
out_prefix = os.path.basename(os.path.splitext(files[0])[0])
else:
out_prefix = None
# Process each input file
if not isinstance(files, list):
files = [files]
for ifile in files:
# Open file
if isinstance(ifile, dict):
self.process_line(ifile)
continue
elif isinstance(ifile, StringIO):
fd = ifile
else:
if self.is_gz_file(ifile):
fd = gzip.open(ifile, "rt")
else:
fd = open(ifile, "r")
# Debug info, file processed
self.print_error("[-] Processing input file %s\n" % ifile)
# Process all lines in file
try:
for line in fd:
self.process_line(line)
except json.decoder.JSONDecodeError:
if isinstance(ifile, str):
self.print_error(
"Error parsing %s (possible incorrect file type\n" % ifile
)
continue
# Debug info
self.print_error("\r[-] %d JSON read" % self.vt_all, flush=True)
self.print_error("\n")
# Close file
fd.close()
# Print statistics
self.print_statistics()
# If ground truth, print precision, recall, and F1-measure
if self.ground_truth:
self.ground_truth_print()
# Output stats
if self.stats_export:
self.out_stats(out_prefix)
# Output av vendor info
if self.av_tags:
self.out_avtags(out_prefix)
# If alias detection, print map
if self.av_labels.alias_detect:
self.alias_detection(out_prefix, path_export)
return self.output
def process_line(self, line: Union[AnyStr, Dict]):
if isinstance(line, str):
# If blank line, skip
if line == "\n":
return
# Debug info
if self.vt_all % 100 == 0:
self.print_error("\r[-] %d JSON read\n" % self.vt_all, flush=True)
self.vt_all += 1
# Read JSON line
vt_rep = json.loads(line)
else:
vt_rep = line
# Extract sample info
sample_info = self.get_sample_info(vt_rep)
# If no sample info, log error and continue
if sample_info is None:
try:
name = vt_rep["md5"]
self.print_error("\nNo scans for %s\n" % name, flush=True)
except KeyError:
self.print_error("\nCould not process: %s\n" % line, flush=True)
self.stats["noscans"] += 1
return
# Get the distinct tokens from all the av labels in the report
# And print them.
try:
self.get_tokens(sample_info)
except Exception:
traceback.print_exc(file=sys.stderr)
return
def get_tokens(self, sample_info: NamedTuple):
# Sample's name is selected hash type (md5 by default)
name = getattr(sample_info, self.hash_type)
# If the VT report has no AV labels, output and continue
if not sample_info.labels:
self.print_output("%s\t-\t[]\n" % (name))
# self.print_error('\nNo AV labels for %s\n' % name, flush=True)
return
# AV VENDORS PER TOKEN
av_tmp = self.av_labels.get_sample_tags(sample_info)
if self.av_tags:
self.av_vender_tags(av_tmp)
tags = self.av_labels.rank_tags(av_tmp)
if self.av_labels.alias_detect:
self.av_vender_tokens(tags)
# Compute VT_Count
vt_count = len(sample_info.labels)
# Collect stats
# TODO: should iterate once over tags,
# for both stats and aliasdetect
if tags:
self.collect_stats(tags, vt_count)
# Select family for sample if needed,
# i.e., for compatibility mode or for ground truth
fam, gt_family = self.get_family(name, tags)
# Check if sample is PUP, if requested
pup_val = self.is_pup(self.pup_classify, tags)
# Print family (and ground truth if available)
if self.family_only:
class_entry = self.avclass1_output(
name=name,
family=fam,
ground_truth=gt_family,
pup_val=pup_val,
vt_count=vt_count,
)
self.output["labels"].append(class_entry)
else:
class_entry = self.avclass2_output(
name=name,
tags=tags,
sample_info=sample_info,
ground_truth=gt_family,
pup_val=pup_val,
vt_count=vt_count,
)
self.output["labels"].append(class_entry)
def avclass1_output(
self,
name: AnyStr,
family: AnyStr,
ground_truth: AnyStr,
pup_val: Optional[bool],
vt_count: int,
) -> Dict:
"""
Build the v1 classification entry
:param name: Hash
:param family: family classification
:param ground_truth:
:param pup_val: is a pup
:param vt_count:
:return: Dict of classification
"""
self.print_output(
"%s\t%s%s%s\n" % (name, family, ground_truth, self.get_pup_str(pup_val))
)
# Build json output
values = {"hash": name, "av_count": vt_count, "family": family}
if self.ground_truth:
values["ground_truth"] = ground_truth
if self.pup_classify:
values["pup"] = pup_val
return values
def avclass2_output(
self,
name: AnyStr,
ground_truth: AnyStr,
pup_val: Optional[bool],
vt_count: int,
tags: List[Tuple],
sample_info: NamedTuple,
) -> Dict:
"""
Build the v2 classification entry
:param name: Hash
:param ground_truth:
:param pup_val: is a pup
:param vt_count:
:param tags: List of tags and their count
:param sample_info:
:return: Dict of classification
"""
# Build string output
if self.vt_tags:
vtt = self.list_str(sample_info.vt_tags, prefix="\t")
else:
vtt = ""
tag_str = self.format_tag_pairs_str(
tags=tags, taxonomy=self.av_labels.taxonomy, path_export=self.path_export
)
self.print_output(
"%s\t%d\t%s%s%s%s\n"
% (name, vt_count, tag_str, ground_truth, self.get_pup_str(pup_val), vtt)
)
# Build json output
tag_dict = self.format_tag_pairs_list(
tags=tags, taxonomy=self.av_labels.taxonomy, path_export=self.path_export
)
values = {"hash": name, "av_count": vt_count, "tags": tag_dict}
if self.ground_truth:
values["ground_truth"] = self.gt_dict.get(name, "")
if self.pup_classify:
values["pup"] = pup_val
if self.vt_tags:
values["vt_tags"] = sample_info.vt_tags
return values
def get_family(self, name: AnyStr, tags: List[Tuple]) -> Tuple:
if self.family_only or self.ground_truth:
fam = "SINGLETON:" + name
# fam = ''
for (t, s) in tags:
cat = self.av_labels.taxonomy.get_category(t)
if (cat == "UNK") or (cat == "FAM"):
fam = t
break
else:
fam = ""
# Get ground truth family, if available
if self.ground_truth:
self.first_token_dict[name] = fam
gt_family = "\t" + self.gt_dict.get(name, "")
else:
gt_family = ""
return (fam, gt_family)
def collect_stats(self, tags: List[Tuple], vt_count: int):
self.stats["tagged"] += 1
if self.stats_export and vt_count > 3:
self.stats["maltagged"] += 1
cat_map = {
"FAM": False,
"CLASS": False,
"BEH": False,
"FILE": False,
"UNK": False,
}
for t in tags:
cat = self.av_labels.taxonomy.get_info(t[0])[1]
cat_map[cat] = True
for c in cat_map:
if cat_map[c]:
self.stats[c] += 1
def av_vender_tags(self, av_tmp: Dict):
for t in av_tmp:
tmap = self.avtags_dict.get(t, {})
for av in av_tmp[t]:
ctr = tmap.get(av, 0)
tmap[av] = ctr + 1
self.avtags_dict[t] = tmap
def av_vender_tokens(self, tags: List[Tuple]):
prev_tokens = set()
for entry in tags:
curr_tok = entry[0]
curr_count = self.token_count_map.get(curr_tok, 0)
self.token_count_map[curr_tok] = curr_count + 1
for prev_tok in prev_tokens:
if prev_tok < curr_tok:
pair = (prev_tok, curr_tok)
else:
pair = (curr_tok, prev_tok)
pair_count = self.pair_count_map.get(pair, 0)
self.pair_count_map[pair] = pair_count + 1
prev_tokens.add(curr_tok)
def get_pup_str(self, is_pup: Optional[bool] = None) -> AnyStr:
if is_pup is True:
return "\t1"
elif is_pup is False:
return "\t0"
else:
return ""
def is_pup(self, pup_classify: bool, tags: List[Tuple]) -> Optional[bool]:
if pup_classify:
if self.av_labels.is_pup(tags, self.av_labels.taxonomy):
is_pup = True
else:
is_pup = False
else:
is_pup = None
return is_pup
def get_hash_type(self, hash_type: Optional[AnyStr] = None) -> AnyStr:
if self.ground_truth:
with open(self.ground_truth, "r") as gt_fd:
for line in gt_fd:
gt_hash, family = map(str, line.strip().split("\t", 1))
self.gt_dict[gt_hash] = family
# Guess type of hash in ground truth file
return self.guess_hash(list(self.gt_dict.keys())[0])
else:
return hash_type if hash_type else "md5"
def print_statistics(self):
self.print_error(
"[-] Samples: %d NoScans: %d NoTags: %d GroundTruth: %d\n"
% (
self.vt_all,
self.stats["noscans"],
self.vt_all - self.stats["tagged"],
len(self.gt_dict),
)
)
def ground_truth_print(self):
# If ground truth, print precision, recall, and F1-measure
precision, recall, fmeasure = ec.eval_precision_recall_fmeasure(
self.gt_dict, self.first_token_dict
)
self.print_error(
"Precision: %.2f\tRecall: %.2f\tF1-Measure: %.2f\n"
% (precision, recall, fmeasure)
)
self.output["ground_truth"] = {
"precision": "%.2f" % precision,
"recall": "%.2f" % recall,
"f1-measure": "%.2f" % fmeasure,
}
def alias_detection(self, out_prefix: AnyStr, path_export: bool = False):
self.output["alias"] = []
alias_fd = None
alias_filename = None
# Sort token pairs by number of times they appear together
sorted_pairs = sorted(self.pair_count_map.items(), key=itemgetter(1))
# sorted_pairs = sorted(self.pair_count_map.items())
# Open alias file
if out_prefix:
alias_filename = out_prefix + ".alias"
alias_fd = open(alias_filename, "w+")
# Output header line
alias_fd.write(
"# t1\tt2\t|t1|\t|t2|\t|t1^t2|\t|t1^t2|/|t1|\t|t1^t2|/|t2|\n"
)
# Compute token pair statistic and output to alias file
for (t1, t2), c in sorted_pairs:
n1 = self.token_count_map[t1]
n2 = self.token_count_map[t2]
if n1 < n2:
x = t1
y = t2
xn = n1
yn = n2
else:
x = t2
y = t1
xn = n2
yn = n1
f = float(c) / float(xn)
finv = float(c) / float(yn)
if path_export:
x = self.av_labels.taxonomy.get_path(x)
y = self.av_labels.taxonomy.get_path(y)
self.output["alias"].append(
{
"alias_token": x,
"alias_tag": y,
"count_token": xn,
"count_tag": yn,
"ratio": c,
"ratio_token": f,
"raiio_tag": finv,
}
)
if out_prefix:
alias_fd.write(
"%s\t%s\t%d\t%d\t%d\t%0.2f\t%0.2f\n" % (x, y, xn, yn, c, f, finv)
)
if out_prefix:
# Close alias file
alias_fd.close()
self.print_error("[-] Alias data in %s\n" % (alias_filename))
def out_avtags(self, out_prefix: AnyStr):
if out_prefix:
avtags_fd = open("%s.avtags" % out_prefix, "w")
for t in sorted(self.avtags_dict.keys()):
avtags_fd.write("%s\t" % t)
pairs = sorted(
self.avtags_dict[t].items(), key=lambda pair: pair[1], reverse=True
)
for pair in pairs:
avtags_fd.write("%s|%d," % (pair[0], pair[1]))
avtags_fd.write("\n")
avtags_fd.close()
self.output["av_tags"] = {}
for tag in sorted(self.avtags_dict.keys()):
self.output["av_tags"][tag] = []
pairs = sorted(
self.avtags_dict[tag].items(), key=lambda pair: pair[1], reverse=True
)
for pair in pairs:
self.output["av_tags"][tag].append({"name": pair[0], "count": pair[1]})
def out_stats(self, out_prefix: AnyStr):
# Output stats
num_samples = self.vt_all
num_tagged = self.stats["tagged"]
tag_frac = float(num_tagged) / float(num_samples) * 100
num_maltagged = self.stats["maltagged"]
maltag_frac = float(num_maltagged) / float(num_samples) * 100
if out_prefix:
stats_fd = open("%s.stats" % out_prefix, "w")
stats_fd.write("Samples: %d\n" % num_samples)
stats_fd.write("Tagged (all): %d (%.01f%%)\n" % (num_tagged, tag_frac))
stats_fd.write(
"Tagged (VT>3): %d (%.01f%%)\n" % (num_maltagged, maltag_frac)
)
for c in ["FILE", "CLASS", "BEH", "FAM", "UNK"]:
count = self.stats[c]
frac = float(count) / float(num_maltagged) * 100
stats_fd.write("%s: %d (%.01f%%)\n" % (c, self.stats[c], frac))
stats_fd.close()
self.output["stats"] = {
"samples": num_samples,
"tagged_all": {"count": num_tagged, "ratio": "%.01f%%" % tag_frac},
"tagged_vt3": {"count": num_maltagged, "ratio": "%.01f%%" % maltag_frac},
"category": [],
}
for cat in ["FILE", "CLASS", "BEH", "FAM", "UNK"]:
count = self.stats[cat]
frac = float(count) / float(num_maltagged) * 100
self.output["stats"]["category"].append(
{cat: {"count": count, "ratio": "%.01f%%" % frac}}
)
def guess_hash(self, h: AnyStr) -> Optional[AnyStr]:
"""
Guess hash type based on ``len(h)``
:param h: The hash
:return: The hash type (str)
"""
hlen = len(h)
if hlen == 32:
return "md5"
elif hlen == 40:
return "sha1"
elif hlen == 64:
return "sha256"
return None
def format_tag_pairs_str(
self, tags: List[Tuple], taxonomy: Taxonomy = None, path_export: bool = False
) -> AnyStr:
"""
Get ranked tags as a string.
:param tags:
:param taxonomy:
:return: List of tags
"""
if not tags:
return ""
if path_export and taxonomy is not None:
p = taxonomy.get_path(tags[0][0])
else:
p = tags[0][0]
out = "%s|%d" % (p, tags[0][1])
for (t, s) in tags[1:]:
if path_export and taxonomy is not None:
p = taxonomy.get_path(t)
else:
p = t
out += ",%s|%d" % (p, s)
return out
def format_tag_pairs_list(
self, tags: List[Tuple], taxonomy: Taxonomy = None, path_export: bool = False
) -> List[Dict]:
"""
Get ranked tags as a list dictionary.
:param tags:
:param taxonomy:
:return: List of tags
"""
out = []
for (tag, count) in tags:
values = {"tag": tag, "count": count}
if path_export and taxonomy is not None:
values["category"] = taxonomy.get_category(tag)
values["path"] = taxonomy.get_path(tag)
out.append(values)
return out
def list_str(
self, vt_tags: Optional[Dict], sep: AnyStr = ", ", prefix: AnyStr = ""
) -> AnyStr:
"""
Return list as a string
:param vt_tags: The list of virus total tags
:param sep: The separator
:param prefix: The prefix
:return: A string representation of the list
"""
if not vt_tags or len(vt_tags) == 0:
return ""
out = prefix + vt_tags[0]
for s in vt_tags[1:]:
out = out + sep + s
return out
def is_gz_file(self, filepath):
with open(filepath, "rb") as test_f:
return test_f.read(2) == b"\x1f\x8b"
def print_error(self, output: AnyStr = "", flush=False):
if self.console:
# TODO - would this be better? print(output, file=sys.stderr, flush=flush, end="")
sys.stderr.write(output)
if flush:
sys.stderr.flush()
def print_output(self, output: AnyStr = ""):
if self.console:
sys.stdout.write(output)
def main():
args = parse_args()
# Create AvLabels object
av_labels = AvLabels(
tag_file=args.tag,
tax_file=args.tax,
exp_file=args.exp,
av_file=args.av,
alias_detect=args.aliasdetect,
)
# Build list of input files
files = get_arg_files(
file_input=args.input,
)
av_class = AVClassLabeler(av_labels=av_labels)
result = av_class.run(
files=files,
data_type=args.type,
hash_type=args.hash,
stats_export=args.stats,
vt_tags=args.vtt,
av_tags=args.avtags,
ground_truth=args.gt,
pup_classify=args.pup,
path_export=args.path,
family_only=args.c,
console=not args.json,
)
if args.json:
print(json.dumps(result))
def get_arg_files(
file_input: List[AnyStr],
) -> List[AnyStr]:
"""
Return List of the files to process
:param file_input: file(s) or directory to process
:return: List of type str
"""
ifile_l = []
for fi in file_input:
if os.path.isdir(fi):
for f in os.listdir(fi):
dir_file = os.path.join(fi, f)
if dir_file not in ifile_l:
ifile_l.append(dir_file)
elif fi not in ifile_l:
ifile_l.append(fi)
return ifile_l
def parse_args():
argparser = argparse.ArgumentParser(
prog="avclass",
description="Extracts tags for a set of samples. Also calculates precision and"
" recall if ground truth available",
)
argparser.add_argument(
"-i",
"--input",
action="append",
help="input report file (plain or gzip) or directory. (Can be provided multiple times)",
)
argparser.add_argument(
"-t", "--type", help="the type of report file (vt2, vt3, lb, md)"
)
argparser.add_argument(
"-gt",
help="file with ground truth. If provided it evaluates clustering accuracy. "
"Prints precision, recall, F1-measure.",
)
argparser.add_argument(
"-vtt", help="Include VT tags in the output.", action="store_true"
)
argparser.add_argument(
"-tag", help="file with tagging rules.", default=util.DEFAULT_TAG_PATH
)
argparser.add_argument(
"-tax", help="file with taxonomy.", default=util.DEFAULT_TAX_PATH
)
argparser.add_argument(
"-exp", help="file with expansion rules.", default=util.DEFAULT_EXP_PATH
)
argparser.add_argument("-av", help="file with list of AVs to use")
argparser.add_argument(
"-avtags", help="extracts tags per av vendor", action="store_true"
)
argparser.add_argument(
"-pup",
action="store_true",
help="if used each sample is classified as PUP or not",
)
argparser.add_argument(
"-p", "--path", help="output.full path for tags", action="store_true"
)
argparser.add_argument(
"-hash",
help="hash used to name samples. Should match ground truth",
choices=["md5", "sha1", "sha256"],
)
argparser.add_argument(
"-c",
help="Compatibility mode. Outputs results in AVClass format.",
action="store_true",
)
argparser.add_argument(
"-aliasdetect", action="store_true", help="if used produce aliases file at end"
)
argparser.add_argument(
"-json", "--json", action="store_true", help="output console to json"
)
argparser.add_argument(
"-stats",
action="store_true",
help="if used produce 1 file with stats per category "
"(File, Class, Behavior, Family, Unclassified)",
)
args = argparser.parse_args()
if not args.input:
sys.stderr.write("Input file / directory is required: " "-i\n")
exit(1)
if not args.type:
sys.stderr.write(
"[-] No type defined, using file type of VirusTotal v3: '-t vt3'\n"
)
devnull = "/dev/null"
# TODO - consider letting argparse handle this?
if args.tag:
if args.tag == devnull:
sys.stderr.write("[-] Using no tagging rules\n")
else:
sys.stderr.write("[-] Using tagging rules in %s\n" % args.tag)
else:
sys.stderr.write(
"[-] Using default tagging rules in %s\n" % util.DEFAULT_TAG_PATH
)
# TODO - consider letting argparse handle this?
if args.tax:
if args.tax == devnull:
sys.stderr.write("[-] Using no taxonomy\n")
else:
sys.stderr.write("[-] Using taxonomy in %s\n" % args.tax)
else:
sys.stderr.write("[-] Using default taxonomy in %s\n" % util.DEFAULT_TAX_PATH)
# TODO - consider letting argparse handle this?
if args.exp:
if args.exp == devnull:
sys.stderr.write("[-] Using no expansion tags\n")
else:
sys.stderr.write("[-] Using expansion tags in %s\n" % args.exp)
else:
sys.stderr.write(
"[-] Using default expansion tags in %s\n" % util.DEFAULT_EXP_PATH
)
return args
if __name__ == "__main__":
main()
```
|
{
"source": "jeffg2k/RootTheBox",
"score": 2
}
|
#### File: RootTheBox/models/FlagAttachment.py
```python
import os
from uuid import uuid4
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Unicode, String, Integer
from models.BaseModels import DatabaseObject
from libs.StringCoding import encode, decode
from builtins import str
from tornado.options import options
class FlagAttachment(DatabaseObject):
"""
These are files that the administrator wants to
distribute alongside a flag.
"""
uuid = Column(String(36), unique=True, nullable=False, default=lambda: str(uuid4()))
flag_id = Column(Integer, ForeignKey("flag.id"), nullable=False)
_file_name = Column(Unicode(64), nullable=False)
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
fname = value.replace("\n", "").replace("\r", "")
self._file_name = str(os.path.basename(fname))[:64]
@property
def data(self):
with open(options.flag_attachment_dir + "/" + self.uuid, "rb") as fp:
return decode(fp.read(), "base64")
@data.setter
def data(self, value):
if self.uuid is None:
self.uuid = str(uuid4())
self.byte_size = len(value)
with open(options.flag_attachment_dir + "/" + self.uuid, "wb") as fp:
fp.write(str(encode(value, "base64")).encode())
def delete_data(self):
""" Remove the file from the file system, if it exists """
fpath = options.flag_attachment_dir + "/" + self.uuid
if os.path.exists(fpath) and os.path.isfile(fpath):
os.unlink(fpath)
```
|
{
"source": "jeffgabhart/django-permissions-auditor",
"score": 2
}
|
#### File: test_app/views/function_based.py
```python
from permissions_auditor.core import get_views
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import permission_required, user_passes_test
from django.contrib.auth import get_user_model
from django.shortcuts import render
User = get_user_model()
@permission_required('auth.view_user')
def user_index(request):
context = {
'users': User.objects.filter(is_superuser=False)
}
return render(request, 'users.html', context)
@user_passes_test(lambda u: u.is_superuser)
def superuser_index(request):
context = {
'users': User.objects.filter(is_superuser=True)
}
return render(request, 'users.html', context)
@staff_member_required
def permissions_index(request):
context = {
'views': get_views()
}
return render(request, 'permissions_list.html', context)
@permission_required('perm.does_not_exist')
def invalid_permission_view(request):
return render(request, 'base.html', {})
```
#### File: django-permissions-auditor/permissions_auditor/admin.py
```python
from django.contrib import admin, messages
from django.contrib.admin import helpers
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import GroupAdmin
from django.contrib.auth.models import Group, Permission
from django.db import models
from django.db.models import Prefetch
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import path, reverse
from django.utils.html import mark_safe
from django.utils.translation import gettext_lazy as _
from permissions_auditor.core import get_views, _get_setting
from permissions_auditor.forms import AuditorAdminPermissionForm
class View(models.Model):
"""Dummy model to display views index pages in the admin."""
class Meta:
managed = False
verbose_name = 'permission'
verbose_name_plural = _('Site Views')
app_label = 'permissions_auditor'
class ViewsIndexAdmin(admin.ModelAdmin):
"""
Index containing all of the views found on the django site,
and the permissions needed to access them.
"""
form = AuditorAdminPermissionForm
fieldsets = (
(_('Permission Info'), {
'fields': ('name', 'content_type', 'codename'),
}),
(_('Objects with this Permission'), {
'fields': ('users', 'groups'),
}),
)
def get_urls(self):
info = self.model._meta.app_label, self.model._meta.model_name
return [
path('', self.admin_site.admin_view(self.index), name='%s_%s_changelist' % info),
path('<str:permission>/',
self.admin_site.admin_view(self.permission_detail),
name='%s_%s_permissiondetail' % info),
]
def get_object(self, request, permission, from_field=None):
try:
app_label, codename = permission.split('.')
return Permission.objects.get(content_type__app_label=app_label, codename=codename)
except (Permission.DoesNotExist, ValueError):
return None
def get_form(self, request, obj, change=False, **kwargs):
defaults = {
'users': obj.user_set.filter(is_active=True),
'groups': obj.group_set.all()
}
return self.form(request.POST or defaults, instance=obj)
def index(self, request):
context = dict(self.admin_site.each_context(request))
context.update({
'views': get_views(),
'group_by': request.GET.get('group_by', 'module')
})
return TemplateResponse(request, "permissions_auditor/admin/views_index.html", context)
def permission_detail(self, request, permission, obj=None):
try:
obj = self.get_object(request, permission)
except Permission.MultipleObjectsReturned:
return self._get_obj_multiple_exist_redirect(request, permission)
if obj is None:
return self._get_obj_does_not_exist_redirect(request, self.model._meta, permission)
opts = self.model._meta
adminForm = helpers.AdminForm(
self.get_form(request, obj),
list(self.get_fieldsets(request, obj)),
{},
model_admin=self
)
media = self.media + adminForm.media
if (request.method == 'POST' and
adminForm.form.is_valid() and
self.has_auditor_change_permission(request)):
obj.user_set.set(adminForm.form.cleaned_data['users'])
obj.group_set.set(adminForm.form.cleaned_data['groups'])
return self.response_change(request, obj)
context = {
**self.admin_site.each_context(request),
'adminform': adminForm,
'errors': helpers.AdminErrorList(adminForm.form, []),
'media': media,
'views': get_views(),
'permission': '{}.{}'.format(obj.content_type.app_label, obj.codename),
'opts': opts,
'add': False,
'change': True,
'is_popup': False,
'save_as': self.save_as,
'has_editable_inline_admin_formsets': False,
'has_view_permission': self.has_view_permission(request, obj),
'has_add_permission': self.has_add_permission(request, obj),
'has_change_permission': self.has_auditor_change_permission(request),
'has_delete_permission': self.has_delete_permission(request, obj),
'app_label': opts.app_label,
}
return TemplateResponse(
request, "permissions_auditor/admin/permission_detail.html", context
)
def has_view_permission(self, request, obj=None):
return request.user.is_staff
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_module_permission(self, request):
return self.has_view_permission(request)
def has_auditor_change_permission(self, request):
return request.user.has_perms(['auth.change_user', 'auth.change_group'])
def _get_obj_multiple_exist_redirect(self, request, permission):
"""
Create a message informing the user that multiple permissions were found
for the specified permission string, and return to the admin index page.
"""
msg = _('Found multiple permissions when looking up “%(permission)s”. '
'Please ensure only a single permission exists with this name.') % {
'permission': permission
}
self.message_user(request, msg, messages.WARNING)
url = reverse('admin:index', current_app=self.admin_site.name)
return HttpResponseRedirect(url)
class AuditorGroupAdmin(GroupAdmin):
list_display = ['name', 'permissions_display', 'users_display']
def permissions_display(self, obj):
result = ''
for perm in obj.permissions.all():
perm_str = '{}.{}'.format(perm.content_type.app_label, perm.codename)
url = reverse('admin:permissions_auditor_view_permissiondetail', args=(perm_str,))
result += '<a href="{}">{}</a><br/>'.format(url, perm_str)
return mark_safe(result)
permissions_display.short_description = 'Permissions'
def users_display(self, obj):
result = ''
for user in obj.active_users:
url = reverse(
'admin:{}_{}_change'.format(user._meta.app_label, user._meta.model_name),
args=(user.pk,)
)
result += '<a href="{}">{}</a><br/>'.format(url, user)
return mark_safe(result)
users_display.short_description = 'Active Users'
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related(
'permissions',
'permissions__content_type',
Prefetch(
'user_set',
queryset=get_user_model()._default_manager.filter(is_active=True),
to_attr='active_users'
)
)
if _get_setting('PERMISSIONS_AUDITOR_ADMIN'):
admin.site.register(View, ViewsIndexAdmin)
if _get_setting('PERMISSIONS_AUDITOR_ADMIN_OVERRIDE_GROUPS'):
admin.site.unregister(Group)
admin.site.register(Group, AuditorGroupAdmin)
```
#### File: permissions_auditor/processors/auth_decorators.py
```python
import inspect
from .base import BaseDecoratorProcessor
class PermissionRequiredDecoratorProcessor(BaseDecoratorProcessor):
"""
Process ``@permission_required()`` decorator.
"""
def can_process(self, view):
if inspect.isclass(view):
for func in self._get_method_decorators(view.dispatch):
if 'user_passes_test' not in (func.__name__, func.__qualname__.split('.')[0]):
continue
test_func = inspect.getclosurevars(func).nonlocals['test_func']
if test_func.__name__ == 'check_perms':
return True
elif inspect.isfunction(view):
# Unwrap the function and look for the has_perms property.
return self._has_func_decorator(view, 'has_perms')
return False
def get_permission_required(self, view):
permissions = []
if inspect.isclass(view):
for func in self._get_method_decorators(view.dispatch):
if 'user_passes_test' not in (func.__name__, func.__qualname__.split('.')[0]):
continue
test_func = inspect.getclosurevars(func).nonlocals['test_func']
if test_func.__name__ == 'check_perms':
closures = inspect.getclosurevars(test_func).nonlocals
if 'perm' in closures:
perm = closures['perm']
# Ensure perm is not a function
if not inspect.isfunction(perm):
if isinstance(perm, str):
permissions.append(perm)
else:
permissions.extend(perm)
elif inspect.isfunction(view) and self._has_test_func(view):
for closure in self._get_test_func_closures(view):
if 'perm' in closure.nonlocals:
perm = closure.nonlocals['perm']
# Ensure perm is not a function
if not inspect.isfunction(perm):
if isinstance(perm, str):
permissions.append(perm)
else:
permissions.extend(perm)
return permissions
def get_login_required(self, view):
return True
class LoginRequiredDecoratorProcessor(BaseDecoratorProcessor):
"""
Process ``@login_required`` decorator.
"""
def can_process(self, view):
if inspect.isclass(view):
return self._has_method_decorator(view.dispatch, 'login_required')
elif inspect.isfunction(view):
# Unwrap the function and look for the is_authenticated property.
return self._has_func_decorator(view, 'is_authenticated')
return False
def get_login_required(self, view):
return True
class StaffMemberRequiredDecoratorProcessor(BaseDecoratorProcessor):
"""
Process Django admin's ``@staff_member_required`` decorator.
"""
def can_process(self, view):
if inspect.isclass(view):
return self._has_method_decorator(view.dispatch, 'staff_member_required')
elif inspect.isfunction(view):
# Unwrap the function and look for the is_staff property.
return self._has_func_decorator(view, 'is_staff')
return False
def get_login_required(self, view):
return True
def get_docstring(self, view):
return 'Staff member required'
class ActiveUserRequiredDecoratorProcessor(BaseDecoratorProcessor):
"""
Process ``@user_passes_test(lambda u: u.is_active)`` decorator.
"""
def can_process(self, view):
if inspect.isclass(view) and self._has_method_decorator(view.dispatch, 'user_passes_test'):
return self._has_test_func_lambda(view.dispatch, 'is_active')
elif inspect.isfunction(view):
# Unwrap the function and look for the is_active property.
return self._has_func_decorator(view, 'is_active')
return False
def get_login_required(self, view):
return True
def get_docstring(self, view):
return 'Active user required'
class AnonymousUserRequiredDecoratorProcessor(BaseDecoratorProcessor):
"""
Process ``@user_passes_test(lambda u: u.is_anonymous)`` decorator.
"""
def can_process(self, view):
if inspect.isclass(view) and self._has_method_decorator(view.dispatch, 'user_passes_test'):
return self._has_test_func_lambda(view.dispatch, 'is_anonymous')
elif inspect.isfunction(view):
# Unwrap the function and look for the is_anonymous property.
return self._has_func_decorator(view, 'is_anonymous')
return False
def get_docstring(self, view):
return 'Anonymous user required'
class SuperUserRequiredDecoratorProcessor(BaseDecoratorProcessor):
"""
Process ``@user_passes_test(lambda u: u.is_superuser)`` decorator.
"""
def can_process(self, view):
if inspect.isclass(view) and self._has_method_decorator(view.dispatch, 'user_passes_test'):
return self._has_test_func_lambda(view.dispatch, 'is_superuser')
elif inspect.isfunction(view):
# Unwrap the function and look for the is_superuser property.
return self._has_func_decorator(view, 'is_superuser')
return False
def get_login_required(self, view):
return True
def get_docstring(self, view):
return 'Superuser required'
class UserPassesTestDecoratorProcessor(BaseDecoratorProcessor):
"""
Process ``@user_passes_test()`` decorator.
.. note::
the ``@user_passes_test`` decorator does not automatically check
that the User is not anonymous. This means they don't necessarily need
to be authenticated for the check to pass, so this processor returns
``None`` (unknown) for the login_required attribute.
"""
def can_process(self, view):
# Some decorators use user_passes_test() internally, so we need to filter
# them out since they are processed elsewhere.
blacklist = (
'is_authenticated', 'has_perms', 'is_staff', 'is_active', 'is_anonymous',
'is_superuser',
)
if inspect.isclass(view):
for func in self._get_method_decorators(view.dispatch):
if 'user_passes_test' not in (func.__name__, func.__qualname__.split('.')[0]):
continue
if not any([self._has_test_func_lambda(func, tag) for tag in blacklist]):
return True
if inspect.isfunction(view) and self._has_test_func(view):
for closure in self._get_test_func_closures(view):
if not any([tag in closure.unbound for tag in blacklist]):
return True
return False
def get_login_required(self, view):
return None
def get_docstring(self, view):
return 'Custom user test'
```
#### File: permissions_auditor/processors/base.py
```python
import inspect
from django.conf import ImproperlyConfigured
class BaseProcessor:
def can_process(self, view):
"""
Can this processor process the provided view?
:param view: the view being processed.
:type view: function or class
:return: whether this processor can process the view.
Default: ``False``
:rtype: boolean
"""
return True
def get_permission_required(self, view):
"""
Returns permissions required on the provided view.
Must return an iterable.
:param view: the view being processed.
:type view: function or class
:return: the permissions required to access the view. Default: ``[]``
:rtype: list(str)
"""
return []
def get_login_required(self, view):
"""
Returns if a user needs to be logged in to access the view.
:param view: the view being processed.
:type view: function or class
:return: whether a user must be logged in to access this view.
Default: ``False``
:rtype: boolean or None (if unknown)
"""
return False
def get_docstring(self, view):
"""
Returns any additional information that should be displayed when
showing permisison information.
:param view: the view being processed.
:type view: function or class
:return: the string to display in the additional info column. Default: ``None``
:rtype: str or None
"""
return None
class BaseFuncViewProcessor(BaseProcessor):
"""Base class for processing function based views."""
def can_process(self, view):
return inspect.isfunction(view)
class BaseCBVProcessor(BaseProcessor):
"""Base class for processing class based views."""
def can_process(self, view):
return inspect.isclass(view)
class BaseDecoratorProcessor(BaseProcessor):
"""Base class with utilities for unwrapping decorators."""
def _has_method_decorator(self, function, func_name):
"""
Checks if a function with the name `func_name` (str) is present within the
``@method_decorator`` on the provided function.
"""
closures = inspect.getclosurevars(function).nonlocals
if 'decorators' in closures:
for func in closures['decorators']:
if func.__name__ == func_name or func.__qualname__.split('.')[0] == func_name:
return True
if 'method' in closures:
return self._has_method_decorator(closures['method'], func_name)
return False
def _get_method_decorators(self, function):
"""
Returns a generator of functions that decorate the provided function using
``@method_decorator``.
"""
closures = inspect.getclosurevars(function).nonlocals
if 'decorators' in closures:
for func in closures['decorators']:
yield func
if 'method' in closures:
yield from self._get_method_decorators(closures['method'])
def _has_test_func(self, function):
"""
Checks if the provided function is decorated with the ``user_passes_test`` decorator.
"""
closures = inspect.getclosurevars(function).nonlocals
if 'test_func' in closures:
return True
if 'view_func' in closures:
return self._has_test_func(closures['view_func'])
return False
def _has_test_func_lambda(self, function, name):
"""
Checks if the provided function's test_func contains the lambda expression ``name`` (str).
"""
closures = inspect.getclosurevars(function).nonlocals
if 'test_func' in closures:
if name in inspect.getclosurevars(closures['test_func']).unbound:
return True
if 'decorators' in closures:
for func in closures['decorators']:
if self._has_test_func_lambda(func, name):
return True
if 'method' in closures:
return self._has_test_func_lambda(closures['method'], name)
return False
def _get_test_func_closures(self, function):
closures = inspect.getclosurevars(function).nonlocals
if 'test_func' in closures:
yield inspect.getclosurevars(closures['test_func'])
if 'view_func' in closures:
yield from self._get_test_func_closures(closures['view_func'])
def _has_func_decorator(self, function, func_name):
closures = inspect.getclosurevars(function).nonlocals
if 'test_func' in closures:
test_closures = inspect.getclosurevars(closures['test_func']).unbound
if func_name in test_closures:
return True
if 'view_func' in closures:
return self._has_func_decorator(closures['view_func'], func_name)
return False
class BaseFileredMixinProcessor(BaseCBVProcessor):
"""
Base class for parsing mixins on class based views.
Set ``class_filter`` to filter the class names the processor applies to.
ONLY checks top level base classes.
:var class_filter: initial value: ``None``
"""
class_filter = None
def can_process(self, view):
if not super().can_process(view):
return False
view_bases = [cls.__module__ + '.' + cls.__name__ for cls in view.__bases__]
for cls_filter in self.get_class_filter():
if cls_filter in view_bases:
return True
return False
def get_class_filter(self):
"""
Override this method to override the class_names attribute.
Must return an iterable.
:return: a list of strings containing the full paths of mixins to detect.
:raises ImproperlyConfigured: if the ``class_filter`` atribute is ``None``.
"""
if self.class_filter is None:
raise ImproperlyConfigured(
'{0} is missing the class_filter attribute. Define {0}.class_filter, or override '
'{0}.get_class_filter().'.format(self.__class__.__name__)
)
if isinstance(self.class_filter, str):
cls_filter = (self.class_filter, )
else:
cls_filter = self.class_filter
return cls_filter
```
#### File: permissions_auditor/tests/base.py
```python
from django.test import SimpleTestCase
class ProcessorTestCase(SimpleTestCase):
processor = None
def assertCannotProcess(self, views):
for view in views:
self.assertFalse(self.processor.can_process(view))
def assertCanProcessView(self, view, permissions=[], login_required=False, docstring=None):
self.assertTrue(self.processor.can_process(view))
self.assertCountEqual(self.processor.get_permission_required(view), permissions)
self.assertEqual(self.processor.get_login_required(view), login_required)
self.assertEqual(self.processor.get_docstring(view), docstring)
```
#### File: tests/fixtures/decorator_views.py
```python
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import (
login_required, permission_required, user_passes_test
)
from django.utils.decorators import method_decorator
from django.views.generic import View
@login_required
def login_required_view(request):
pass
class LoginRequiredMethodDecoratorView(View):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@permission_required('tests.test_perm')
def permission_required_view(request):
pass
@permission_required(('tests.test_perm', 'tests.test_perm2'))
def permission_required_multi_view(request):
pass
class PermissionRequiredMethodDecoratorView(View):
@method_decorator(permission_required('tests.test_perm'))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@staff_member_required
def staff_member_required_view(request):
pass
class StaffMemberRequiredMethodDecoratorView(View):
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@user_passes_test(lambda u: u.is_active)
def active_user_required_view(request):
pass
class ActiveUserRequiredMethodDecoratorView(View):
@method_decorator(user_passes_test(lambda u: u.is_active))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@user_passes_test(lambda u: u.is_anonymous)
def anonymous_user_required_view(request):
pass
class AnonymousUserRequiredMethodDecoratorView(View):
@method_decorator(user_passes_test(lambda u: u.is_anonymous))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@user_passes_test(lambda u: u.is_superuser)
def superuser_required_view(request):
pass
class SuperUserRequiredMethodDecoratorView(View):
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@user_passes_test(lambda u: u.email is not None)
def user_passes_test_view(request):
pass
class UserPassesTestMethodDecoratorView(View):
@method_decorator(user_passes_test(lambda u: u.email is not None))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@login_required
@user_passes_test(lambda u: u.is_active)
@user_passes_test(lambda u: u.email is not None)
def nested_decorator_view(request):
pass
class NestedMethodDecoratorView(View):
@method_decorator(login_required)
@method_decorator(user_passes_test(lambda u: u.is_active))
@method_decorator(user_passes_test(lambda u: u.email is not None))
def dispatch(self, request, *args, **kwargs):
pass
```
#### File: permissions_auditor/tests/test_auth_mixin_processors.py
```python
from permissions_auditor.processors import auth_mixins
from permissions_auditor.tests.base import ProcessorTestCase
from permissions_auditor.tests.fixtures import views
class MixinProcessorTestCaseMixin:
"""Mixins should never be able to process function based views."""
def assert_cannot_process_non_cbvs(self):
self.assertCannotProcess([
views.base_view, views.BaseView,
views.login_required_view,
views.staff_member_required_view,
views.active_user_required_view,
views.anonymous_user_required_view,
views.superuser_required_view,
views.user_passes_test_view
])
class TestLoginRequiredMixinProcessor(MixinProcessorTestCaseMixin, ProcessorTestCase):
def setUp(self):
self.processor = auth_mixins.LoginRequiredMixinProcessor()
self.expected_results = {'permissions': [], 'login_required': True, 'docstring': None}
def test_cannot_process(self):
self.assertCannotProcess([
views.PermissionRequiredView, views.PermissionRequiredMultiView,
views.PermissionRequiredViewNoPerm,
views.PermissionRequiredViewDocstring, views.PermissionRequiredViewNoDocstring,
views.UserPassesTestView, views.UserPassesTestViewCustomFunc,
views.UserPassesTestViewDocstring, views.UserPassesTestViewNoDocstring
])
def test_cb_loginrequiredview(self):
self.assertCanProcessView(views.LoginRequiredView, **self.expected_results)
class TestPermissionRequiredMixinProcessor(MixinProcessorTestCaseMixin, ProcessorTestCase):
def setUp(self):
self.processor = auth_mixins.PermissionRequiredMixinProcessor()
def test_cannot_process(self):
self.assertCannotProcess([
views.LoginRequiredView,
views.UserPassesTestView, views.UserPassesTestViewCustomFunc,
views.UserPassesTestViewDocstring, views.UserPassesTestViewNoDocstring
])
def test_cb_permissionsrequiredview(self):
self.assertCanProcessView(
views.PermissionRequiredView,
permissions=['tests.test_perm'], login_required=True, docstring=None
)
def test_cb_permissionsrequiredview_no_perm(self):
"""
Views that override has_permission() and do not set permission_required should be processed.
"""
self.assertCanProcessView(
views.PermissionRequiredViewNoPerm,
permissions=[], login_required=True, docstring='The user\'s first name must be Bob'
)
def test_cb_permissionsrequiredview_docstring(self):
"""Views that implement has_permission() and have a docstring should be retrieved."""
self.assertCanProcessView(
views.PermissionRequiredViewDocstring,
permissions=['tests.test_perm'], login_required=True,
docstring='Custom docstrings should be detected.'
)
def test_cb_permissionsrequiredview_no_docstring(self):
"""
Views that implement has_permission() and do not have a docstring
should return a default messsage.
"""
self.assertCanProcessView(
views.PermissionRequiredViewNoDocstring,
permissions=['tests.test_perm'], login_required=True,
docstring='Custom (no docstring found)'
)
def test_cb_permissionrequiredview_multi(self):
"""
Views with multiple permissions should return all permissions.
"""
self.assertCanProcessView(
views.PermissionRequiredMultiView,
permissions=['tests.test_perm', 'tests.test_perm2'], login_required=True, docstring=None
)
class TestUserPassesTestMixinProcessor(MixinProcessorTestCaseMixin, ProcessorTestCase):
def setUp(self):
self.processor = auth_mixins.UserPassesTestMixinProcessor()
def test_cannot_process(self):
self.assertCannotProcess([
views.LoginRequiredView,
views.PermissionRequiredView, views.PermissionRequiredMultiView,
views.PermissionRequiredViewNoPerm,
views.PermissionRequiredViewDocstring, views.PermissionRequiredViewNoDocstring
])
def test_cb_userpassestestview(self):
self.assertCanProcessView(
views.UserPassesTestView,
permissions=[], login_required=None, docstring='Custom (no docstring found)'
)
def test_cb_userpassestestview_docstring(self):
"""Views that implement test_func() and have a docstring should be retrieved."""
self.assertCanProcessView(
views.UserPassesTestViewDocstring,
permissions=[], login_required=None, docstring='Custom docstrings should be detected.'
)
def test_cb_userpassestestview_no_docstring(self):
"""
Views that implement test_func() and do not have a docstring
should return a default messsage.
"""
self.assertCanProcessView(
views.UserPassesTestViewNoDocstring,
permissions=[], login_required=None, docstring='Custom (no docstring found)'
)
def test_cb_userpassestestview_custom_func(self):
"""
Views that override get_test_func() should check the new function returned
instead of the default test_func() function.
"""
self.assertCanProcessView(
views.UserPassesTestViewCustomFunc,
permissions=[], login_required=None, docstring='Custom docstrings should be detected.'
)
```
|
{
"source": "Jeffgan99/LMPC_Dynamics",
"score": 2
}
|
#### File: LMPC_Dynamics/src/main.py
```python
import sys
sys.path.append('schemes')
from SysModel import Simulator, PID
from Classes import ClosedLoopData, LMPCprediction
from LTVMPC import LTV_MPC
from LTIMPC import LTI_MPC
from Track import Map, unityTestChangeOfCoordinates
from LMPC import LMPCplus
from Utilities import Regression
from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost
import numpy as np
import matplotlib.pyplot as plt
import pdb
import pickle
def main():
# Parameter initialization
dt = 1.0 / 10.0 # Controller discretization time
Time = 100 # Simulation time for PID
TimeMPC = 100 # Time for LTI-MPC
TimeMPC_tv = 100 # Time for LTV-MPC
TimeLMPC = 400 # Time for LMPC
vt = 0.8 # Reference velocity for path controllers
v0 = 0.5 # Initial velocity at lap 0
N = 12 # Horizon
dim_state = 6 # State dimension
dim_input = 2 # Input dimension
Q = np.diag([1.0, 1.0, 1, 1, 0.0, 100.0]) # vx, vy, wz, epsi, s, ey
R = np.diag([1.0, 10.0]) # delta, a
Q_lmpc = np.diag([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) * 0 # vx, vy, wz, epsi, s, ey
R_lmpc = np.diag([1.0, 1.0]) * 0 # delta, a
Qf = np.array([0, 10]) * 1
QterminalSlack = np.diag([10, 1, 1, 1, 10, 1]) * 20
dR_LMPC = np.array([1.0, 10.0]) * 10
inputConstr = np.array([[0.5, 0.5],
[10.0, 10.0]])
LMPC_Solver = "CVX" # Can pick CVX for cvxopt or OSQP. For OSQP uncomment line 14 in LMPC.py
numSS_it = 4 # Number of trajectories used at each iteration to build the safe set
numSS_Points = 40 # Number of points to select from each trajectory to build the safe set
Laps = 46 + numSS_it # Total LMPC laps (50 laps)
map = Map(0.4) # Initialize the map
model = Simulator(map) # Initialize the MPC model
LMPCmodel = Simulator(map, 1, 1) # Initialize the LMPC model
# State constraints for LTI-MPC and LTV-MPC
Fx_MPC = np.array([[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., -1.]])
bx_MPC = np.array([[10],
[2.],
[2.]])
# Input constraints for LTI-MPC and LTV-MPC
Fu_MPC = np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]])
bu_MPC = np.array([[inputConstr[0, 0]],
[inputConstr[0, 1]],
[inputConstr[1, 0]],
[inputConstr[1, 1]]])
# State constraints for LMPC
Fx = np.array([[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., -1.]])
bx = np.array([[map.halfWidth],
[map.halfWidth]])
# Input constraints for LMPC
Fu = np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]])
bu = np.array([[inputConstr[0,0]],
[inputConstr[0,1]],
[inputConstr[1,0]],
[inputConstr[1,1]]])
print("Starting PID")
ClosedLoopDataPID = ClosedLoopData(dt, Time, v0)
PIDController = PID(vt)
model.Sim(ClosedLoopDataPID, PIDController)
file_data = open(sys.path[0]+'\data\ClosedLoopDataPID.obj', 'wb')
pickle.dump(ClosedLoopDataPID, file_data)
file_data.close()
print("===== PID terminated")
print("Starting LTI-MPC")
lamb = 0.0000001
A, B, Error = Regression(ClosedLoopDataPID.x, ClosedLoopDataPID.u, lamb)
ClosedLoopDataLTI_MPC = ClosedLoopData(dt, TimeMPC, v0)
LTIMPC = LTI_MPC(A, B, Q, R, N, vt, Fx_MPC, bx_MPC, Fu_MPC, bu_MPC)
model.Sim(ClosedLoopDataLTI_MPC, LTIMPC)
file_data = open(sys.path[0] + '\data\ClosedLoopDataLTI_MPC.obj', 'wb')
pickle.dump(ClosedLoopDataLTI_MPC, file_data)
file_data.close()
print("===== LTI-MPC terminated")
print("Starting LTV-MPC")
ClosedLoopDataLTV_MPC = ClosedLoopData(dt, TimeMPC_tv, v0)
LTVMPC = LTV_MPC(Q, R, N, vt, dim_state, dim_input, ClosedLoopDataPID.x, ClosedLoopDataPID.u, dt, map, Fx_MPC, bx_MPC, Fu_MPC, bu_MPC)
model.Sim(ClosedLoopDataLTV_MPC, LTVMPC)
file_data = open(sys.path[0] + 'data\ClosedLoopDataLTV_MPC.obj', 'wb')
pickle.dump(ClosedLoopDataLTV_MPC, file_data)
file_data.close()
print("===== LTV-MPC terminated")
print("Starting LMPC")
ClosedLoopLMPC = ClosedLoopData(dt, TimeLMPC, v0)
LMPCOpenLoopData = LMPCprediction(N, dim_state, dim_input, TimeLMPC, numSS_Points, Laps)
LMPC = LMPCplus(numSS_Points, numSS_it, N, QterminalSlack, Qf, Q_lmpc, R_lmpc, dR_LMPC, dt, map, Laps, TimeLMPC, LMPC_Solver, Fx, bx, Fu, bu)
LMPC.addTrajectory(ClosedLoopDataPID)
LMPC.addTrajectory(ClosedLoopDataLTV_MPC)
LMPC.addTrajectory(ClosedLoopDataPID)
LMPC.addTrajectory(ClosedLoopDataLTI_MPC)
x0 = np.zeros((1, dim_state))
x0_glob = np.zeros((1, dim_state))
x0[0, :] = ClosedLoopLMPC.x[0, :]
x0_glob[0, :] = ClosedLoopLMPC.x_glob[0, :]
for it in range(numSS_it, Laps):
ClosedLoopLMPC.updateInitialConditions(x0, x0_glob)
LMPCmodel.Sim(ClosedLoopLMPC, LMPC, LMPCOpenLoopData)
LMPC.addTrajectory(ClosedLoopLMPC)
if LMPC.feasible == 0:
break
else:
# Reset Initial Conditions
x0[0, :] = ClosedLoopLMPC.x[ClosedLoopLMPC.SimTime, :] - np.array([0, 0, 0, 0, map.TrackLength, 0])
x0_glob[0, :] = ClosedLoopLMPC.x_glob[ClosedLoopLMPC.SimTime, :]
file_data = open(sys.path[0] + '\data\LMPController.obj', 'wb')
pickle.dump(ClosedLoopLMPC, file_data)
pickle.dump(LMPC, file_data)
pickle.dump(LMPCOpenLoopData, file_data)
file_data.close()
print("===== LMPC terminated")
laptimes = np.zeros((50, 2))
# Laptime Plot
for i in range(0, LMPC.it):
print("Lap time at iteration ", i, " is ", LMPC.Qfun[0, i] * dt, "s")
laptimes[i, 0] = LMPC.Qfun[0, i] * dt
laptimes[i, 1] = i
plt.figure(3)
plt.plot(laptimes[:, 1], laptimes[:, 0], '-o')
plt.ylabel('Lap Time (sec)')
plt.xlabel('Lap Number')
print("===== Start Plotting")
plotTrajectory(map, ClosedLoopDataPID.x, ClosedLoopDataPID.x_glob, ClosedLoopDataPID.u)
plotTrajectory(map, ClosedLoopDataLTI_MPC.x, ClosedLoopDataLTI_MPC.x_glob, ClosedLoopDataLTI_MPC.u)
plotTrajectory(map, ClosedLoopDataLTV_MPC.x, ClosedLoopDataLTV_MPC.x_glob, ClosedLoopDataLTV_MPC.u)
plotCost(LMPC.Qfun, int(TimeLMPC / dt) + 1)
plotC(LMPC.Qfun_SelectedTot, numSS_it)
plotQC(LMPC.Qcost, numSS_Points)
plotQt(LMPC.qq)
plotQcost(LMPC.costSolved)
plotClosedLoopLMPC(LMPC, map)
animation_xy(map, LMPCOpenLoopData, LMPC, Laps - 2)
animation_states(map, LMPCOpenLoopData, LMPC, 10)
unityTestChangeOfCoordinates(map, ClosedLoopDataPID)
unityTestChangeOfCoordinates(map, ClosedLoopDataLTI_MPC)
unityTestChangeOfCoordinates(map, ClosedLoopLMPC)
saveGif_xyResults(map, LMPCOpenLoopData, LMPC, Laps-1)
Save_statesAnimation(map, LMPCOpenLoopData, LMPC, 5)
plt.show()
if __name__ == "__main__":
main()
```
|
{
"source": "JeffGe05/Qrobots-Python",
"score": 3
}
|
#### File: JeffGe05/Qrobots-Python/baseplayer.py
```python
import random
class BasePlayer:
ROLE = {"unassigned": "未分配身份"}
def __init__(self, sender):
self.user_id = sender["user_id"]
self.name = sender.get("card") or sender["nickname"]
self.role = "unassigned"
self.player_id = None
# def __hash__(self):
# return hash(self.user_id)
def assignrole(self, role):
self.role = role
@property
def rolename(self):
return self.ROLE[self.role]
@property
def string(self):
return f"[{self.player_id}] {self.name}"
@classmethod
def updaterolenames(cls, roles: dict):
cls.ROLE.update(roles)
class BaseCampaign:
PlayerConfig = dict()
def __init__(self, group_id):
self.group_id = group_id
self.players = []
self._game = self._start()
self.messages = []
self.acceptedplayers = ()
self.commands = dict()
self.commandparsers = dict()
self.gameended = False
def addplayer(self, sender):
raise NotImplementedError
@property
def playernum(self):
"""返回玩家数量"""
return len(self.players)
@property
def allplayerset(self):
"""返回是否已收到所有玩家的指令"""
return all((cmd is not None for cmd in self.commands.values()))
def assignroles(self):
"""给玩家分配角色,并打乱座次"""
if self.playernum not in self.PlayerConfig:
print("玩家数量不够或超出。") # TODO
return
roles = self.PlayerConfig[self.playernum].copy()
player_ids = list(range(1, self.playernum + 1))
random.shuffle(roles)
random.shuffle(player_ids)
for i, p in enumerate(self.players):
p.assignrole(roles[i])
p.player_id = player_ids[i]
self.players.sort(key=lambda p: p.player_id)
def _start(self):
yield NotImplemented
raise NotImplementedError
def resume(self):
try:
return next(self._game)
except StopIteration:
self.gameended = True
return None
def addprivatemsg(self, player, msg):
if isinstance(player, int):
user_id = player
elif isinstance(player, BasePlayer):
user_id = player.user_id
else:
raise ValueError
# Debug: 添加玩家QQ号在消息前
# msg = f"[{user_id}] {msg}"
self.messages.append(({"user_id": user_id}, msg))
def addprivatemsgforall(self, msg):
for p in self.players:
self.addprivatemsg(p, msg)
def addgroupmsg(self, msg):
self.messages.append(({"group_id": self.group_id}, msg))
def yieldmessages(self):
messages = self.messages
self.messages = []
return messages
def acceptcommandfrom(self, acceptedplayers, commandparsers):
"""设置允许接收指令的玩家。
Arguments:
acceptedplayers {str, iterable[Player]} -- 允许发送指令的玩家,'all'代表所有玩家
commandparsers {callable, iterable[callable]} -- 设置玩家指令的解析器,单个代表解析器设置给所有玩家
"""
if isinstance(acceptedplayers, str) and acceptedplayers == "all": # 所有玩家
acceptedplayers = self.players
elif isinstance(acceptedplayers, (set, tuple, list)): # 部分玩家
# acceptedplayers = acceptedplayers
pass
else:
raise ValueError
self.acceptedplayers = tuple(p.user_id for p in acceptedplayers) # 转换为QQ号(int)
acceptedplayers = None # Debug: 防止下面代码误用
self.commands = dict.fromkeys(self.acceptedplayers, None) # 清空指令缓存
if callable(commandparsers): # 单个解析器分配给所有acceptedplayers
self.commandparsers = dict.fromkeys(self.acceptedplayers, commandparsers)
elif isinstance(commandparsers, (tuple, list)): # 解析器列表按顺序分配给acceptedplayers
if len(self.acceptedplayers) != len(commandparsers): # 长度不匹配,抛出ValueError异常
raise ValueError
if isinstance(self.acceptedplayers, set): # 集合无顺序,抛出ValueError异常
raise ValueError
self.commandparsers = dict(zip(self.acceptedplayers, commandparsers))
else:
raise ValueError
def handlemessage(self, context):
raise NotImplementedError
@property
def allplayerstring(self):
return "\n".join((p.string for p in self.players))
def getplayerbyid(self, id):
for p in self.players:
if p.player_id == id:
return p
def playerstringat(self, player_ids):
if isinstance(player_ids, int):
return self.getplayerbyid(player_ids).string
player_ids = list(player_ids)
player_ids.sort()
return "\n".join(
(self.getplayerbyid(player_id).string for player_id in player_ids)
)
def __contains__(self, user_id):
"""判断QQ号是否是本局游戏的玩家。"""
return user_id in (p.user_id for p in self.players)
```
#### File: JeffGe05/Qrobots-Python/commandparser.py
```python
import re
class ErrorString(str):
pass
def getnumberparser(include=None, exclude=None, commandnumber=1, maxnum=None):
"""返回一个数字解析器。
Keyword Arguments:
include {list, tuple, set} -- 允许范围内的数字 (default: {None})
exclude {list, tuple, set} -- 从允许范围内排除的数字 (default: {None})
commandnumber {int} -- 需要的指令数量 (default: {1})
maxnum {int} -- 若 include 不给定,自动设置 include 为 range(1, maxnum + 1) (default: {None})
"""
if include is None:
include = set(range(1, maxnum + 1))
if exclude is None:
exclude = set()
# if exclude is not None:
# include -= set(exclude)
def commandparser(message: str):
pattern = (
r"^[^\d]*"
+ r"[,.\s\u3000\u3001\u3002\uFF0C\uFF1B\u548C]".join(
[r"(\d+)"] * commandnumber
)
+ r"[^\d]*$"
)
m = re.match(pattern, message)
try:
if m is None:
return ErrorString("INVALID_MESSAGE")
res = set(map(int, m.groups()))
if len(res) != commandnumber:
return ErrorString("DUPLICATED_NUMBERS")
if any((i not in include for i in res)):
return ErrorString("INVALID_MESSAGE")
if any((i in exclude for i in res)):
return ErrorString("EXCLUDED_NUMBER")
except ValueError:
return ErrorString("INVALID_MESSAGE")
return res
return commandparser
def getboolparser(yes=None, no=None):
"""返回一个二值(布尔)解析器。
Keyword Arguments:
yes {set, tuple, list} -- 真值描述集合 (default: {None})
no {set, tuple, list} -- 假值描述集合 (default: {None})
"""
if yes is None:
yes = {"y"}
if no is None:
no = {"n"}
def commandparser(message):
msgyes = any((y in message for y in yes))
msgno = any((n in message for n in no))
if msgno:
return False
elif msgyes:
return True
else:
return ErrorString("INVALID_MESSAGE")
return commandparser
if __name__ == "__main__":
commandparser = getnumberparser(exclude=(3,), maxnum=10, commandnumber=3)
try:
while True:
print(commandparser(input()))
except KeyboardInterrupt:
pass
```
|
{
"source": "JeffGebhart/leanix-python-library",
"score": 3
}
|
#### File: leanix-python-library/LeanIX/__init__.py
```python
import requests
import json
from .Graph import Graph
from .FactSheets import FactSheets
from .Users import Users
from .Metrics import Metrics
from .Polls import Polls
class LeanIX:
def __init__(self,api_token="",workspaceid="",baseurl="https://us.leanix.net/"):
""" Authenticates to LeanIX with the given API Token and returns the Authorization header for use in future calls
Retuns a class with subclasses pointing to the other options:
.factsheets
.users
.graph
"""
self.__api_token = api_token
self.workspaceid = workspaceid
self.baseurl = baseurl
if not self.baseurl.endswith("/"):
self.baseurl += "/" # If URL is not passed in with a trailing /, add it
self.auth()
self.graph = Graph(self)
self.factsheets = FactSheets(self)
self.users = Users(self)
self.metrics = Metrics(self)
self.polls = Polls(self)
def __repr__(self):
return f"LeanIX Object for {self.workspaceid}"
def auth(self):
"""Authenticate to LeanIX using the API token in the class"""
auth_url = f"{self.baseurl}/services/mtm/v1/oauth2/token"
response = requests.post(auth_url, auth=('apitoken', self.__api_token),
data={'grant_type': 'client_credentials'})
response.raise_for_status()
self._access_token = response.json()['access_token']
self._auth_header = 'Bearer ' + self._access_token
self.header = {'Authorization': self._auth_header,"Content-Type":"application/json"}
def _sendrequest(self,method,parameters=None,data=None,verb="get"):
api_url =f'{self.baseurl}{method}'
allrows = []
if verb.lower() == "get":
response = requests.get(api_url,headers=self.header,params=parameters)
jresp = response.json()
if jresp['total'] == len(jresp['data']):
allrows = jresp['data']
else:
allrows+=jresp['data']
while jresp['total'] > len(allrows):
parameters['page']+=1
allrows += requests.get(api_url,headers=self.header,params=parameters).json()['data']
elif verb.lower() == "post":
return requests.post(api_url,headers=self.header,data=json.dumps(data),params=parameters)
a=1
return allrows
```
|
{
"source": "jeffgeee/congress-legislators",
"score": 3
}
|
#### File: congress-legislators/scripts/house_contacts.py
```python
import requests
import lxml
import re
from datetime import datetime
from utils import load_data, save_data, parse_date
def run():
today = datetime.now().date()
y = load_data("legislators-current.yaml")
# TODO use download util?
xml = requests.get("http://clerk.house.gov/xml/lists/MemberData.xml")
root=lxml.etree.fromstring(xml.content)
for moc in y:
try:
term = moc["terms"][-1]
except IndexError:
print("Member has no terms", moc)
continue
if term["type"] != "rep": continue
if today < parse_date(term["start"]) or today > parse_date(term["end"]):
print("Member's last listed term is not current", moc, term["start"])
continue
if "class" in term: del term["class"]
ssdd = "%s%02d" % (term["state"], term["district"])
query_str = "./members/member/[statedistrict='%s']" % ssdd
# TODO: Follow up
query_str = query_str.replace("AS00", "AQ00")
#print(query_str)
mi = root.findall(query_str)[0].find('member-info')
if (mi.find('bioguideID').text != moc['id'].get('bioguide')):
print("Warning: Bioguide ID did not match for %s%02d (%s != %s)" % (
term["state"], term["district"],
mi.find('bioguideID').text, moc['id']['bioguide']))
# for now, no automatic name updates since there is disagremeent on how to handle
# firstname = mi.find('firstname').text
# middlename = mi.find('middlename').text #could be empty
# lastname = mi.find('lastname').text
#TODO: follow up, why no official name?
if mi.find('official-name') is None or mi.find('official-name').text is None:
print("Warning: No official-name tag for %s" % ssdd)
officialname = None
else:
officialname = re.sub("'", "’", mi.find('official-name').text)
office_room = mi.find('office-room').text
office_building = mi.find('office-building').text
office_building_full = office_building.replace("RHOB", "Rayburn House Office Building")
office_building_full = office_building_full.replace("CHOB", "Cannon House Office Building")
office_building_full = office_building_full.replace("LHOB", "Longworth House Office Building")
office_zip = mi.find('office-zip').text
office_zip_suffix = mi.find('office-zip-suffix').text
office = "{} {}".format(office_room, office_building_full)
address = "{} {} Washington DC {}-{}".format(office_room, office_building_full, office_zip, office_zip_suffix)
phone = mi.find('phone').text
phone_parsed = re.sub("^\((\d\d\d)\) ", lambda m : m.group(1) + "-", phone) # replace (XXX) area code with XXX- for compatibility w/ existing format
#for now, no automatic name updates since there is disagremeent on how to handle
# moc["name"]["first"] = firstname
# if (middlename):
# moc["name"]["middle"] = middlename
# else:
# if ("middle" in moc["name"]):
# del moc["name"]["middle"]
# moc["name"]["last"] = lastname
# TODO: leave if none?
if (officialname):
moc["name"]["official_full"] = officialname
term["address"] = address
term["office"] = office
term["phone"] = phone_parsed
save_data(y, "legislators-current.yaml")
if __name__ == '__main__':
run()
```
|
{
"source": "jeffgerhard/iatp",
"score": 3
}
|
#### File: jeffgerhard/iatp/iatp.py
```python
import sys
import internetarchive as ia
import os
from shutil import rmtree
from tempfile import mkdtemp
def assert_title_page(identifier, titlepage, silent=False):
tp = str(titlepage)
result = list()
# first download scandata.xml file from the item
try:
item = ia.get_item(identifier)
except:
raise('IA identifier not found.')
scandata = identifier + '_scandata.xml'
for f in item.files:
if f['name'] == scandata:
ia.download(identifier, files=scandata, silent=silent)
with open(os.path.join(identifier, scandata), 'r') as fh:
xml = fh.read()
nochange = True
match = False
final = list()
for line in xml.splitlines():
newline = line
if 'leafNum' in line: # like: <page leafNum="0">
leafnum = line.split('"')[1]
if leafnum == tp:
match = True
if 'pageType' in line: # like: <pageType>Normal</pageType>
if match is True:
if 'Title' in line:
result.append('Title page is already declared.')
else:
newline = line.replace('Normal', 'Title')
nochange = False
match = False # don't match in the rest of this document
elif 'Title' in line: # erroneous title page from IA
newline = line.replace('Title', 'Normal')
nochange = False
final.append(newline)
if nochange is True:
result.append('No changes detected.')
else:
with open(os.path.join(identifier, scandata), 'w') as fh:
fh.write('\n'.join(final))
result.append('Generated new scandata.xml file and uploading...')
ia.upload(identifier, files=[os.path.join(identifier, scandata)])
result.append('Success!')
rmtree(identifier)
return '\n'.join(result)
if __name__ == "__main__":
if len(sys.argv) > 1:
identifier = sys.argv[1]
titlepage = sys.argv[2]
d = mkdtemp()
os.chdir(d)
print(assert_title_page(identifier, titlepage))
rmtree(d, ignore_errors=True)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.