ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40c248c01fc5e7a57c5ffd9aeacca8040d63eb5 | from django.apps import apps
from django.contrib.auth.models import Permission
from django.db.models import Q
from ..utils.auth import GROUP_ADMIN_PK, GROUP_DEFAULT_PK
from .models import Group, User
def get_permission_change_data(sender, permissions=None, **kwargs):
"""
Yields all necessary collections if 'users.can_see_name' permission changes.
"""
users_app = apps.get_app_config(app_label="users")
for permission in permissions:
# There could be only one 'users.can_see_name' and then we want to return data.
if (
permission.content_type.app_label == users_app.label
and permission.codename == "can_see_name"
):
yield from users_app.get_startup_elements()
def create_builtin_groups_and_admin(**kwargs):
"""
Creates the builtin groups: Default, Delegates, Staff and Committees.
Creates the builtin user: admin.
"""
# Check whether there are groups in the database.
if Group.objects.exists():
# Do completely nothing if there are already some groups in the database.
return
permission_strings = (
"agenda.can_be_speaker",
"agenda.can_manage",
"agenda.can_manage_list_of_speakers",
"agenda.can_see",
"agenda.can_see_internal_items",
"assignments.can_manage",
"assignments.can_nominate_other",
"assignments.can_nominate_self",
"assignments.can_see",
"core.can_manage_config",
"core.can_manage_logos_and_fonts",
"core.can_manage_projector",
"core.can_manage_tags",
"core.can_manage_chat",
"core.can_see_frontpage",
"core.can_see_projector",
"core.can_use_chat",
"mediafiles.can_manage",
"mediafiles.can_see",
"mediafiles.can_see_hidden",
"mediafiles.can_upload",
"motions.can_create",
"motions.can_create_amendments",
"motions.can_manage",
"motions.can_manage_metadata",
"motions.can_see",
"motions.can_support",
"users.can_manage",
"users.can_see_extra_data",
"users.can_see_name",
)
permission_query = Q()
permission_dict = {}
# Load all permissions
for permission_string in permission_strings:
app_label, codename = permission_string.split(".")
query_part = Q(content_type__app_label=app_label) & Q(codename=codename)
permission_query = permission_query | query_part
for permission in Permission.objects.select_related("content_type").filter(
permission_query
):
permission_string = ".".join(
(permission.content_type.app_label, permission.codename)
)
permission_dict[permission_string] = permission
# Default (pk 1 == GROUP_DEFAULT_PK)
base_permissions = (
permission_dict["agenda.can_see"],
permission_dict["agenda.can_see_internal_items"],
permission_dict["assignments.can_see"],
permission_dict["core.can_see_frontpage"],
permission_dict["core.can_see_projector"],
permission_dict["mediafiles.can_see"],
permission_dict["motions.can_see"],
permission_dict["users.can_see_name"],
)
group_default = Group(pk=GROUP_DEFAULT_PK, name="Default")
group_default.save(skip_autoupdate=True)
group_default.permissions.add(*base_permissions)
# Admin (pk 2 == GROUP_ADMIN_PK)
group_admin = Group(pk=GROUP_ADMIN_PK, name="Admin")
group_admin.save(skip_autoupdate=True)
# Delegates (pk 3)
delegates_permissions = (
permission_dict["agenda.can_see"],
permission_dict["agenda.can_see_internal_items"],
permission_dict["agenda.can_be_speaker"],
permission_dict["assignments.can_see"],
permission_dict["assignments.can_nominate_other"],
permission_dict["assignments.can_nominate_self"],
permission_dict["core.can_see_frontpage"],
permission_dict["core.can_see_projector"],
permission_dict["mediafiles.can_see"],
permission_dict["motions.can_see"],
permission_dict["motions.can_create"],
permission_dict["motions.can_create_amendments"],
permission_dict["motions.can_support"],
permission_dict["users.can_see_name"],
)
group_delegates = Group(pk=3, name="Delegates")
group_delegates.save(skip_autoupdate=True)
group_delegates.permissions.add(*delegates_permissions)
# Staff (pk 4)
staff_permissions = (
permission_dict["agenda.can_see"],
permission_dict["agenda.can_see_internal_items"],
permission_dict["agenda.can_be_speaker"],
permission_dict["agenda.can_manage"],
permission_dict["agenda.can_manage_list_of_speakers"],
permission_dict["assignments.can_see"],
permission_dict["assignments.can_manage"],
permission_dict["assignments.can_nominate_other"],
permission_dict["assignments.can_nominate_self"],
permission_dict["core.can_see_frontpage"],
permission_dict["core.can_see_projector"],
permission_dict["core.can_manage_projector"],
permission_dict["core.can_manage_tags"],
permission_dict["core.can_use_chat"],
permission_dict["mediafiles.can_see"],
permission_dict["mediafiles.can_manage"],
permission_dict["mediafiles.can_upload"],
permission_dict["motions.can_see"],
permission_dict["motions.can_create"],
permission_dict["motions.can_create_amendments"],
permission_dict["motions.can_manage"],
permission_dict["motions.can_manage_metadata"],
permission_dict["users.can_see_name"],
permission_dict["users.can_manage"],
permission_dict["users.can_see_extra_data"],
permission_dict["mediafiles.can_see_hidden"],
)
group_staff = Group(pk=4, name="Staff")
group_staff.save(skip_autoupdate=True)
group_staff.permissions.add(*staff_permissions)
# Committees (pk 5)
committees_permissions = (
permission_dict["agenda.can_see"],
permission_dict["agenda.can_see_internal_items"],
permission_dict["assignments.can_see"],
permission_dict["core.can_see_frontpage"],
permission_dict["core.can_see_projector"],
permission_dict["mediafiles.can_see"],
permission_dict["motions.can_see"],
permission_dict["motions.can_create"],
permission_dict["motions.can_create_amendments"],
permission_dict["motions.can_support"],
permission_dict["users.can_see_name"],
)
group_committee = Group(pk=5, name="Committees")
group_committee.save(skip_autoupdate=True)
group_committee.permissions.add(*committees_permissions)
# Create or reset admin user
User.objects.create_or_reset_admin_user()
# After each group was created, the permissions (many to many fields) where
# added to the group. But we do not have to update the cache by calling
# inform_changed_data() because the cache is updated on server start.
|
py | b40c24f14a7b25f32a6cde9b0584ee0eb5be0941 | import csv
from typing import Generator, Optional, List, Tuple
from sklearn.preprocessing import LabelBinarizer
from dostoevsky.tokenization import BaseTokenizer
from dostoevsky.word_vectors import BaseWordVectorsContainer
class BaseCorpusContainer:
def get_prepared_data(self) -> Generator[Tuple[List[List[float]], List[int]], None, None]:
raise NotImplementedError
class RusentimentCorpus(BaseCorpusContainer):
CSV_DELIMITER: str = ','
CSV_QUOTECHAR: str = '"'
UNKNOWN_LABEL: str = 'unknown'
LABELS: List[str] = [
'positive',
'negative',
'neutral',
'skip',
'speech',
UNKNOWN_LABEL,
]
def __init__(
self,
data_path: Optional[str],
tokenizer: BaseTokenizer,
word_vectors_container: BaseWordVectorsContainer,
lemmatize: bool = True,
):
self.data_path = data_path
self.tokenizer = tokenizer
self.lemmatize = lemmatize
self.word_vectors_container = word_vectors_container
self.label_encoder = self.get_label_encoder()
def get_label_encoder(self) -> LabelBinarizer:
label_encoder = LabelBinarizer()
return label_encoder.fit(self.LABELS)
def get_prepared_data(self) -> Generator[Tuple[List[List[float]], List[int]], None, None]:
if not self.data_path:
raise ValueError('data_path is None')
with open(self.data_path) as source:
reader = csv.reader(
source,
delimiter=self.CSV_DELIMITER,
quotechar=self.CSV_QUOTECHAR,
)
for i, (label, text) in enumerate(reader):
if i == 0: # skip headers
continue
encoded_label, *_ = self.label_encoder.transform([label])
tokens = self.tokenizer.split(text, lemmatize=self.lemmatize)
word_vectors = self.word_vectors_container.get_word_vectors(tokens)
if not any(vector.any() for vector in word_vectors): # type: ignore
# FIXME: find better embeddings
encoded_label, *_ = self.label_encoder.transform([
self.UNKNOWN_LABEL
])
yield word_vectors, encoded_label
|
py | b40c2537bf110f76453c6c761da4b86546a7ad7a | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import MAGATestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(MAGATestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
|
py | b40c26069e18276e4fcf4d9d16b15d7eec76a455 | #!/usr/bin/env python
import os, sys, subprocess, argparse, shutil, glob, re
import logging as log
import xml.etree.ElementTree as ET
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
class Fail(Exception):
def __init__(self, text=None):
self.t = text
def __str__(self):
return "ERROR" if self.t is None else self.t
def execute(cmd, shell=False):
try:
log.debug("Executing: %s" % cmd)
log.info('Executing: ' + ' '.join(cmd))
retcode = subprocess.call(cmd, shell=shell)
if retcode < 0:
raise Fail("Child was terminated by signal:" %s -retcode)
elif retcode > 0:
raise Fail("Child returned: %s" % retcode)
except OSError as e:
raise Fail("Execution failed: %d / %s" % (e.errno, e.strerror))
def rm_one(d):
d = os.path.abspath(d)
if os.path.exists(d):
if os.path.isdir(d):
log.info("Removing dir: %s", d)
shutil.rmtree(d)
elif os.path.isfile(d):
log.info("Removing file: %s", d)
os.remove(d)
def check_dir(d, create=False, clean=False):
d = os.path.abspath(d)
log.info("Check dir %s (create: %s, clean: %s)", d, create, clean)
if os.path.exists(d):
if not os.path.isdir(d):
raise Fail("Not a directory: %s" % d)
if clean:
for x in glob.glob(os.path.join(d, "*")):
rm_one(x)
else:
if create:
os.makedirs(d)
return d
def determine_engine_version(manifest_path):
with open(manifest_path, "rt") as f:
return re.search(r'android:versionName="(\d+\.\d+)"', f.read(), re.MULTILINE).group(1)
def determine_opencv_version(version_hpp_path):
# version in 2.4 - CV_VERSION_EPOCH.CV_VERSION_MAJOR.CV_VERSION_MINOR.CV_VERSION_REVISION
# version in master - CV_VERSION_MAJOR.CV_VERSION_MINOR.CV_VERSION_REVISION-CV_VERSION_STATUS
with open(version_hpp_path, "rt") as f:
data = f.read()
major = re.search(r'^#define\W+CV_VERSION_MAJOR\W+(\d+)$', data, re.MULTILINE).group(1)
minor = re.search(r'^#define\W+CV_VERSION_MINOR\W+(\d+)$', data, re.MULTILINE).group(1)
revision = re.search(r'^#define\W+CV_VERSION_REVISION\W+(\d+)$', data, re.MULTILINE).group(1)
version_status = re.search(r'^#define\W+CV_VERSION_STATUS\W+"([^"]*)"$', data, re.MULTILINE).group(1)
return "%(major)s.%(minor)s.%(revision)s%(version_status)s" % locals()
# shutil.move fails if dst exists
def move_smart(src, dst):
def move_recurse(subdir):
s = os.path.join(src, subdir)
d = os.path.join(dst, subdir)
if os.path.exists(d):
if os.path.isdir(d):
for item in os.listdir(s):
move_recurse(os.path.join(subdir, item))
elif os.path.isfile(s):
shutil.move(s, d)
else:
shutil.move(s, d)
move_recurse('')
# shutil.copytree fails if dst exists
def copytree_smart(src, dst):
def copy_recurse(subdir):
s = os.path.join(src, subdir)
d = os.path.join(dst, subdir)
if os.path.exists(d):
if os.path.isdir(d):
for item in os.listdir(s):
copy_recurse(os.path.join(subdir, item))
elif os.path.isfile(s):
shutil.copy2(s, d)
else:
if os.path.isdir(s):
shutil.copytree(s, d)
elif os.path.isfile(s):
shutil.copy2(s, d)
copy_recurse('')
#===================================================================================================
class ABI:
def __init__(self, platform_id, name, toolchain, ndk_api_level = None, cmake_vars = dict()):
self.platform_id = platform_id # platform code to add to apk version (for cmake)
self.name = name # general name (official Android ABI identifier)
self.toolchain = toolchain # toolchain identifier (for cmake)
self.cmake_vars = dict(
ANDROID_STL="gnustl_static",
ANDROID_ABI=self.name,
ANDROID_TOOLCHAIN_NAME=toolchain,
ANDROID_PLATFORM_ID=platform_id,
)
if ndk_api_level:
self.cmake_vars['ANDROID_NATIVE_API_LEVEL'] = ndk_api_level
self.cmake_vars.update(cmake_vars)
def __str__(self):
return "%s (%s)" % (self.name, self.toolchain)
def haveIPP(self):
return self.name == "x86" or self.name == "x86_64"
#===================================================================================================
class Builder:
def __init__(self, workdir, opencvdir, config):
self.workdir = check_dir(workdir, create=True)
self.opencvdir = check_dir(opencvdir)
self.config = config
self.libdest = check_dir(os.path.join(self.workdir, "o4a"), create=True, clean=True)
self.resultdest = check_dir(os.path.join(self.workdir, 'OpenCV-android-sdk'), create=True, clean=True)
self.docdest = check_dir(os.path.join(self.workdir, 'OpenCV-android-sdk', 'sdk', 'java', 'javadoc'), create=True, clean=True)
self.extra_packs = []
self.opencv_version = determine_opencv_version(os.path.join(self.opencvdir, "modules", "core", "include", "opencv2", "core", "version.hpp"))
self.engine_version = determine_engine_version(os.path.join(self.opencvdir, "platforms", "android", "service", "engine", "AndroidManifest.xml"))
self.use_ccache = False if config.no_ccache else True
def get_toolchain_file(self):
if not self.config.force_opencv_toolchain:
toolchain = os.path.join(os.environ['ANDROID_NDK'], 'build', 'cmake', 'android.toolchain.cmake')
if os.path.exists(toolchain):
return toolchain
toolchain = os.path.join(SCRIPT_DIR, "android.toolchain.cmake")
if os.path.exists(toolchain):
return toolchain
else:
raise Fail("Can't find toolchain")
def get_engine_apk_dest(self, engdest):
return os.path.join(engdest, "platforms", "android", "service", "engine", ".build")
def add_extra_pack(self, ver, path):
if path is None:
return
self.extra_packs.append((ver, check_dir(path)))
def clean_library_build_dir(self):
for d in ["CMakeCache.txt", "CMakeFiles/", "bin/", "libs/", "lib/", "package/", "install/samples/"]:
rm_one(d)
def build_library(self, abi, do_install):
cmd = ["cmake", "-GNinja"]
cmake_vars = dict(
CMAKE_TOOLCHAIN_FILE=self.get_toolchain_file(),
WITH_OPENCL="OFF",
WITH_IPP=("ON" if abi.haveIPP() else "OFF"),
WITH_TBB="ON",
BUILD_EXAMPLES="OFF",
BUILD_TESTS="OFF",
BUILD_PERF_TESTS="OFF",
BUILD_DOCS="OFF",
BUILD_ANDROID_EXAMPLES="ON",
INSTALL_ANDROID_EXAMPLES="ON",
)
if self.config.extra_modules_path is not None:
cmd.append("-DOPENCV_EXTRA_MODULES_PATH='%s'" % self.config.extra_modules_path)
if self.use_ccache == True:
cmd.append("-DNDK_CCACHE=ccache")
if do_install:
cmd.extend(["-DBUILD_TESTS=ON", "-DINSTALL_TESTS=ON"])
cmake_vars.update(abi.cmake_vars)
cmd += [ "-D%s='%s'" % (k, v) for (k, v) in cmake_vars.items() if v is not None]
cmd.append(self.opencvdir)
execute(cmd)
if do_install:
execute(["ninja"])
for c in ["libs", "dev", "java", "samples"]:
execute(["cmake", "-DCOMPONENT=%s" % c, "-P", "cmake_install.cmake"])
else:
execute(["ninja", "install/strip"])
def build_engine(self, abi, engdest):
cmd = ["cmake", "-GNinja"]
cmake_vars = dict(
CMAKE_TOOLCHAIN_FILE=self.get_toolchain_file(),
WITH_OPENCL="OFF",
WITH_IPP="OFF",
BUILD_ANDROID_SERVICE = 'ON'
)
cmake_vars.update(abi.cmake_vars)
cmd += [ "-D%s='%s'" % (k, v) for (k, v) in cmake_vars.items() if v is not None]
cmd.append(self.opencvdir)
execute(cmd)
apkdest = self.get_engine_apk_dest(engdest)
assert os.path.exists(apkdest), apkdest
# Add extra data
apkxmldest = check_dir(os.path.join(apkdest, "res", "xml"), create=True)
apklibdest = check_dir(os.path.join(apkdest, "libs", abi.name), create=True)
for ver, d in self.extra_packs + [("3.4.1", os.path.join(self.libdest, "lib"))]:
r = ET.Element("library", attrib={"version": ver})
log.info("Adding libraries from %s", d)
for f in glob.glob(os.path.join(d, abi.name, "*.so")):
log.info("Copy file: %s", f)
shutil.copy2(f, apklibdest)
if "libnative_camera" in f:
continue
log.info("Register file: %s", os.path.basename(f))
n = ET.SubElement(r, "file", attrib={"name": os.path.basename(f)})
if len(list(r)) > 0:
xmlname = os.path.join(apkxmldest, "config%s.xml" % ver.replace(".", ""))
log.info("Generating XML config: %s", xmlname)
ET.ElementTree(r).write(xmlname, encoding="utf-8")
execute(["ninja", "opencv_engine"])
execute(["ant", "-f", os.path.join(apkdest, "build.xml"), "debug"],
shell=(sys.platform == 'win32'))
# TODO: Sign apk
def build_javadoc(self):
classpaths = []
for dir, _, files in os.walk(os.environ["ANDROID_SDK"]):
for f in files:
if f == "android.jar" or f == "annotations.jar":
classpaths.append(os.path.join(dir, f))
cmd = [
"javadoc",
"-header", "OpenCV %s" % self.opencv_version,
"-nodeprecated",
"-footer", '<a href="http://docs.opencv.org">OpenCV %s Documentation</a>' % self.opencv_version,
"-public",
'-sourcepath', os.path.join(self.resultdest, 'sdk', 'java', 'src'),
"-d", self.docdest,
"-classpath", ":".join(classpaths),
'-subpackages', 'org.opencv',
]
execute(cmd)
def gather_results(self, engines):
# Copy all files
root = os.path.join(self.libdest, "install")
for item in os.listdir(root):
src = os.path.join(root, item)
dst = os.path.join(self.resultdest, item)
if os.path.isdir(src):
log.info("Copy dir: %s", item)
if self.config.force_copy:
copytree_smart(src, dst)
else:
move_smart(src, dst)
elif os.path.isfile(src):
log.info("Copy file: %s", item)
if self.config.force_copy:
shutil.copy2(src, dst)
else:
shutil.move(src, dst)
# Copy engines for all platforms
for abi, engdest in engines:
log.info("Copy engine: %s (%s)", abi, engdest)
f = os.path.join(self.get_engine_apk_dest(engdest), "bin", "opencv_engine-debug.apk")
resname = "OpenCV_%s_Manager_%s_%s.apk" % (self.opencv_version, self.engine_version, abi)
dst = os.path.join(self.resultdest, "apk", resname)
if self.config.force_copy:
shutil.copy2(f, dst)
else:
shutil.move(f, dst)
# Clean samples
path = os.path.join(self.resultdest, "samples")
for item in os.listdir(path):
item = os.path.join(path, item)
if os.path.isdir(item):
for name in ["build.xml", "local.properties", "proguard-project.txt"]:
rm_one(os.path.join(item, name))
#===================================================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Build OpenCV for Android SDK')
parser.add_argument("work_dir", nargs='?', default='.', help="Working directory (and output)")
parser.add_argument("opencv_dir", nargs='?', default=os.path.join(SCRIPT_DIR, '../..'), help="Path to OpenCV source dir")
parser.add_argument('--config', default='ndk-10.config.py', type=str, help="Package build configuration", )
parser.add_argument('--ndk_path', help="Path to Android NDK to use for build")
parser.add_argument('--sdk_path', help="Path to Android SDK to use for build")
parser.add_argument("--extra_modules_path", help="Path to extra modules to use for build")
parser.add_argument('--sign_with', help="Certificate to sign the Manager apk")
parser.add_argument('--build_doc', action="store_true", help="Build javadoc")
parser.add_argument('--no_ccache', action="store_true", help="Do not use ccache during library build")
parser.add_argument('--extra_pack', action='append', help="provide extra OpenCV libraries for Manager apk in form <version>:<path-to-native-libs>, for example '2.4.11:unpacked/sdk/native/libs'")
parser.add_argument('--force_copy', action="store_true", help="Do not use file move during library build (useful for debug)")
parser.add_argument('--force_opencv_toolchain', action="store_true", help="Do not use toolchain from Android NDK")
args = parser.parse_args()
log.basicConfig(format='%(message)s', level=log.DEBUG)
log.debug("Args: %s", args)
if args.ndk_path is not None:
os.environ["ANDROID_NDK"] = args.ndk_path
if args.sdk_path is not None:
os.environ["ANDROID_SDK"] = args.sdk_path
if os.path.realpath(args.work_dir) == os.path.realpath(SCRIPT_DIR):
raise Fail("Specify workdir (building from script directory is not supported)")
if os.path.realpath(args.work_dir) == os.path.realpath(args.opencv_dir):
raise Fail("Specify workdir (building from OpenCV source directory is not supported)")
cpath = args.config
if not os.path.exists(cpath):
cpath = os.path.join(SCRIPT_DIR, cpath)
if not os.path.exists(cpath):
raise Fail('Config "%s" is missing' % args.config)
with open(cpath, 'r') as f:
cfg = f.read()
print("Package configuration:")
print('=' * 80)
print(cfg.strip())
print('=' * 80)
exec(compile(cfg, cpath, 'exec'))
log.info("Android NDK path: %s", os.environ["ANDROID_NDK"])
log.info("Android SDK path: %s", os.environ["ANDROID_SDK"])
builder = Builder(args.work_dir, args.opencv_dir, args)
log.info("Detected OpenCV version: %s", builder.opencv_version)
log.info("Detected Engine version: %s", builder.engine_version)
if args.extra_pack:
for one in args.extra_pack:
i = one.find(":")
if i > 0 and i < len(one) - 1:
builder.add_extra_pack(one[:i], one[i+1:])
else:
raise Fail("Bad extra pack provided: %s, should be in form '<version>:<path-to-native-libs>'" % one)
engines = []
for i, abi in enumerate(ABIs):
do_install = (i == 0)
engdest = check_dir(os.path.join(builder.workdir, "build_service_%s" % abi.name), create=True, clean=True)
log.info("=====")
log.info("===== Building library for %s", abi)
log.info("=====")
os.chdir(builder.libdest)
builder.clean_library_build_dir()
builder.build_library(abi, do_install)
log.info("=====")
log.info("===== Building engine for %s", abi)
log.info("=====")
os.chdir(engdest)
builder.build_engine(abi, engdest)
engines.append((abi.name, engdest))
builder.gather_results(engines)
if args.build_doc:
builder.build_javadoc()
log.info("=====")
log.info("===== Build finished")
log.info("=====")
log.info("SDK location: %s", builder.resultdest)
log.info("Documentation location: %s", builder.docdest)
|
py | b40c28d8f7924ac23206c69252e5651626448ebd | from pypresto import *
|
py | b40c295a5a00e1eb3fd559e5e4b80951d72b346d | import logging
import os
import urllib.request
from os.path import join
import boto3
# Map from `filename to cache as` -> `location of file`
URLS_TO_CACHE = {
'business_types.json': 'https://data.cityofgainesville.org/resource/i9px-haju.json',
'business_permits.json': 'https://data.cityofgainesville.org/resource/mfe4-6q3g.json'
}
BUCKET = os.environ['S3_BUCKET']
PREFIX = os.environ['S3_PREFIX']
def lambda_handler(event, context):
main()
def main():
setup_logging()
for filename, url in URLS_TO_CACHE.items():
cache_file(filename, url)
def setup_logging():
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger().setLevel(logging.INFO)
def cache_file(filename, url):
data = get_bytes(url)
logging.info('Downloaded {}'.format(url))
s3_path = save_bytes_to_s3(data, filename)
logging.info('Uploaded {} to {}'.format(url, s3_path))
return s3_path
def get_bytes(url):
with urllib.request.urlopen(url) as resp:
data = resp.read()
return data
def save_bytes_to_s3(data, filename):
key = join(PREFIX, filename)
bucket = boto3.resource('s3').Bucket(BUCKET)
bucket.put_object(Key=key, Body=data)
s3_path = 's3://{}/{}'.format(BUCKET, key)
return s3_path
if __name__ == '__main__':
main()
|
py | b40c29750d1f4ed42411a7d3c9b22f3d9fe817b7 | import subprocess
import unittest
from semver_range import Version, Range
class VersionTestCase(unittest.TestCase):
# largely taken from https://github.com/npm/node-semver/blob/master/test/index.js
def test_invalid(self):
data = [
'1.2.3.4',
'NOT VALID',
1.2,
None,
'',
]
for version in data:
with self.assertRaises(ValueError, msg='Version should be invalid %s' % version):
Version(version)
def test_loose(self):
data = [
['=1.2.3', '1.2.3'],
['01.02.03', '1.2.3'],
['1.2.3-beta.01', '1.2.3-beta.1'],
[' =1.2.3', '1.2.3'],
['1.2.3foo', '1.2.3-foo'],
]
for loose, strict in data:
with self.assertRaises(ValueError, msg='Version should be strictly invalid %s' % loose):
Version(loose)
loose = Version(loose, loose=True)
self.assertEqual(loose, strict)
self.assertTrue(loose.has_same_precedence(strict))
def test_versions(self):
data = [
['0.0.0', '0.0.0-foo'],
['0.0.1', '0.0.0'],
['1.0.0', '0.9.9'],
['0.10.0', '0.9.0'],
['0.99.0', '0.10.0'],
['2.0.0', '1.2.3'],
['1.2.3', '1.2.3-asdf'],
['1.2.3', '1.2.3-4'],
['1.2.3', '1.2.3-4-foo'],
['1.2.3-5-foo', '1.2.3-5'],
['1.2.3-5', '1.2.3-4'],
['1.2.3-5-foo', '1.2.3-5-Foo'],
['3.0.0', '2.7.2+asdf'],
['1.2.3-a.10', '1.2.3-a.5'],
['1.2.3-a.b', '1.2.3-a.5'],
['1.2.3-a.b', '1.2.3-a'],
['1.2.3-a.b.c.10.d.5', '1.2.3-a.b.c.5.d.100'],
['1.2.3-r2', '1.2.3-r100'],
['1.2.3-r100', '1.2.3-R2'],
]
for v1, v2 in data:
v1 = Version(v1)
v2 = Version(v2)
self.assertGreater(v1, v2)
self.assertGreaterEqual(v1, v2)
self.assertLess(v2, v1)
self.assertLessEqual(v2, v1)
self.assertNotEqual(v1, v2)
# self.assertTrue(v2.precedes(v1), msg='%s should precede %s' % (v2, v1))
def test_loose_comparison(self):
data = [
['v0.0.0', '0.0.0-foo'],
['v0.0.1', '0.0.0'],
['v1.0.0', '0.9.9'],
['v0.10.0', '0.9.0'],
['v0.99.0', '0.10.0'],
['v2.0.0', '1.2.3'],
['0.0.0', 'v0.0.0-foo'],
['0.0.1', 'v0.0.0'],
['1.0.0', 'v0.9.9'],
['0.10.0', 'v0.9.0'],
['0.99.0', 'v0.10.0'],
['2.0.0', 'v1.2.3'],
]
for v1, v2 in data:
v1 = Version(v1, loose=True)
v2 = Version(v2, loose=True)
self.assertGreater(v1, v2)
self.assertGreaterEqual(v1, v2)
self.assertLess(v2, v1)
self.assertLessEqual(v2, v1)
self.assertNotEqual(v1, v2)
self.assertTrue(v2.precedes(v1))
def test_loosely_matching_precedence(self):
data = [
['1.2.3', 'v1.2.3'],
['1.2.3', '=1.2.3'],
['1.2.3', 'v 1.2.3'],
['1.2.3', '= 1.2.3'],
['1.2.3', ' v1.2.3'],
['1.2.3', ' =1.2.3'],
['1.2.3', ' v 1.2.3'],
['1.2.3', ' = 1.2.3'],
['1.2.3-0', 'v1.2.3-0'],
['1.2.3-0', '=1.2.3-0'],
['1.2.3-0', 'v 1.2.3-0'],
['1.2.3-0', '= 1.2.3-0'],
['1.2.3-0', ' v1.2.3-0'],
['1.2.3-0', ' =1.2.3-0'],
['1.2.3-0', ' v 1.2.3-0'],
['1.2.3-0', ' = 1.2.3-0'],
['1.2.3-1', 'v1.2.3-1'],
['1.2.3-1', '=1.2.3-1'],
['1.2.3-1', 'v 1.2.3-1'],
['1.2.3-1', '= 1.2.3-1'],
['1.2.3-1', ' v1.2.3-1'],
['1.2.3-1', ' =1.2.3-1'],
['1.2.3-1', ' v 1.2.3-1'],
['1.2.3-1', ' = 1.2.3-1'],
['1.2.3-beta', 'v1.2.3-beta'],
['1.2.3-beta', '=1.2.3-beta'],
['1.2.3-beta', 'v 1.2.3-beta'],
['1.2.3-beta', '= 1.2.3-beta'],
['1.2.3-beta', ' v1.2.3-beta'],
['1.2.3-beta', ' =1.2.3-beta'],
['1.2.3-beta', ' v 1.2.3-beta'],
['1.2.3-beta', ' = 1.2.3-beta'],
]
for v1, v2 in data:
v1 = Version(v1, loose=True)
v2 = Version(v2, loose=True)
self.assertEqual(v1, v2)
self.assertTrue(v1.has_same_precedence(v2))
data += [
['1.2.3-beta+build', ' = 1.2.3-beta+otherbuild'],
['1.2.3+build', ' = 1.2.3+otherbuild'],
['1.2.3-beta+build', '1.2.3-beta+otherbuild'],
['1.2.3+build', '1.2.3+otherbuild'],
[' v1.2.3+build', '1.2.3+otherbuild'],
]
for v1, v2 in data:
v1 = Version(v1, loose=True)
v2 = Version(v2, loose=True)
self.assertTrue(v1.has_same_precedence(v2))
def test_incrementing(self):
data = [
['1.2.3', 'major', '2.0.0'],
['1.2.3', 'minor', '1.3.0'],
['1.2.3', 'patch', '1.2.4'],
['1.2.3tag', 'major', '2.0.0', True],
['1.2.3-tag', 'major', '2.0.0'],
['1.2.3', 'fake', None],
['1.2.0-0', 'patch', '1.2.0'],
['fake', 'major', None],
['1.2.3-4', 'major', '2.0.0'],
['1.2.3-4', 'minor', '1.3.0'],
['1.2.3-4', 'patch', '1.2.3'],
['1.2.3-alpha.0.beta', 'major', '2.0.0'],
['1.2.3-alpha.0.beta', 'minor', '1.3.0'],
['1.2.3-alpha.0.beta', 'patch', '1.2.3'],
['1.2.4', 'prerelease', '1.2.5-0'],
['1.2.3-0', 'prerelease', '1.2.3-1'],
['1.2.3-alpha.0', 'prerelease', '1.2.3-alpha.1'],
['1.2.3-alpha.1', 'prerelease', '1.2.3-alpha.2'],
['1.2.3-alpha.2', 'prerelease', '1.2.3-alpha.3'],
['1.2.3-alpha.0.beta', 'prerelease', '1.2.3-alpha.1.beta'],
['1.2.3-alpha.1.beta', 'prerelease', '1.2.3-alpha.2.beta'],
['1.2.3-alpha.2.beta', 'prerelease', '1.2.3-alpha.3.beta'],
['1.2.3-alpha.10.0.beta', 'prerelease', '1.2.3-alpha.10.1.beta'],
['1.2.3-alpha.10.1.beta', 'prerelease', '1.2.3-alpha.10.2.beta'],
['1.2.3-alpha.10.2.beta', 'prerelease', '1.2.3-alpha.10.3.beta'],
['1.2.3-alpha.10.beta.0', 'prerelease', '1.2.3-alpha.10.beta.1'],
['1.2.3-alpha.10.beta.1', 'prerelease', '1.2.3-alpha.10.beta.2'],
['1.2.3-alpha.10.beta.2', 'prerelease', '1.2.3-alpha.10.beta.3'],
['1.2.3-alpha.9.beta', 'prerelease', '1.2.3-alpha.10.beta'],
['1.2.3-alpha.10.beta', 'prerelease', '1.2.3-alpha.11.beta'],
['1.2.3-alpha.11.beta', 'prerelease', '1.2.3-alpha.12.beta'],
['1.2.0', 'prepatch', '1.2.1-0'],
['1.2.0-1', 'prepatch', '1.2.1-0'],
['1.2.0', 'preminor', '1.3.0-0'],
['1.2.3-1', 'preminor', '1.3.0-0'],
['1.2.0', 'premajor', '2.0.0-0'],
['1.2.3-1', 'premajor', '2.0.0-0'],
['1.2.0-1', 'minor', '1.2.0'],
['1.0.0-1', 'major', '1.0.0'],
]
for row in data:
if len(row) == 4:
version, level, expected, loose = row
else:
version, level, expected = row
loose = False
msg = 'Incrementing %s by %s' % (version, level)
if expected is None:
with self.assertRaises(ValueError):
version = Version(version, loose=loose)
self.assertEqual(version.increment(level), expected, msg=msg)
continue
version = Version(version, loose=loose)
self.assertEqual(version.increment(level), expected, msg=msg)
@unittest.skip('Not implemented')
def test_incrementing_with_pre_release(self):
data = [
['1.2.3', 'major', '2.0.0', False, 'dev'],
['1.2.3', 'minor', '1.3.0', False, 'dev'],
['1.2.3', 'patch', '1.2.4', False, 'dev'],
['1.2.3tag', 'major', '2.0.0', True, 'dev'],
['1.2.3-tag', 'major', '2.0.0', False, 'dev'],
['1.2.3', 'fake', None, False, 'dev'],
['1.2.0-0', 'patch', '1.2.0', False, 'dev'],
['fake', 'major', None, False, 'dev'],
['1.2.3-4', 'major', '2.0.0', False, 'dev'],
['1.2.3-4', 'minor', '1.3.0', False, 'dev'],
['1.2.3-4', 'patch', '1.2.3', False, 'dev'],
['1.2.3-alpha.0.beta', 'major', '2.0.0', False, 'dev'],
['1.2.3-alpha.0.beta', 'minor', '1.3.0', False, 'dev'],
['1.2.3-alpha.0.beta', 'patch', '1.2.3', False, 'dev'],
['1.2.4', 'prerelease', '1.2.5-dev.0', False, 'dev'],
['1.2.3-0', 'prerelease', '1.2.3-dev.0', False, 'dev'],
['1.2.3-alpha.0', 'prerelease', '1.2.3-dev.0', False, 'dev'],
['1.2.3-alpha.0', 'prerelease', '1.2.3-alpha.1', False, 'alpha'],
['1.2.3-alpha.0.beta', 'prerelease', '1.2.3-dev.0', False, 'dev'],
['1.2.3-alpha.0.beta', 'prerelease', '1.2.3-alpha.1.beta', False, 'alpha'],
['1.2.3-alpha.10.0.beta', 'prerelease', '1.2.3-dev.0', False, 'dev'],
['1.2.3-alpha.10.0.beta', 'prerelease', '1.2.3-alpha.10.1.beta', False, 'alpha'],
['1.2.3-alpha.10.1.beta', 'prerelease', '1.2.3-alpha.10.2.beta', False, 'alpha'],
['1.2.3-alpha.10.2.beta', 'prerelease', '1.2.3-alpha.10.3.beta', False, 'alpha'],
['1.2.3-alpha.10.beta.0', 'prerelease', '1.2.3-dev.0', False, 'dev'],
['1.2.3-alpha.10.beta.0', 'prerelease', '1.2.3-alpha.10.beta.1', False, 'alpha'],
['1.2.3-alpha.10.beta.1', 'prerelease', '1.2.3-alpha.10.beta.2', False, 'alpha'],
['1.2.3-alpha.10.beta.2', 'prerelease', '1.2.3-alpha.10.beta.3', False, 'alpha'],
['1.2.3-alpha.9.beta', 'prerelease', '1.2.3-dev.0', False, 'dev'],
['1.2.3-alpha.9.beta', 'prerelease', '1.2.3-alpha.10.beta', False, 'alpha'],
['1.2.3-alpha.10.beta', 'prerelease', '1.2.3-alpha.11.beta', False, 'alpha'],
['1.2.3-alpha.11.beta', 'prerelease', '1.2.3-alpha.12.beta', False, 'alpha'],
['1.2.0', 'prepatch', '1.2.1-dev.0', False, 'dev'],
['1.2.0-1', 'prepatch', '1.2.1-dev.0', False, 'dev'],
['1.2.0', 'preminor', '1.3.0-dev.0', False, 'dev'],
['1.2.3-1', 'preminor', '1.3.0-dev.0', False, 'dev'],
['1.2.0', 'premajor', '2.0.0-dev.0', False, 'dev'],
['1.2.3-1', 'premajor', '2.0.0-dev.0', False, 'dev'],
['1.2.0-1', 'minor', '1.2.0', False, 'dev'],
['1.0.0-1', 'major', '1.0.0', False, 'dev'],
['1.2.3-dev.bar', 'prerelease', '1.2.3-dev.0', False, 'dev'],
]
for version, level, expected, loose, identifier in data:
msg = 'Incrementing %s by %s with identifier %s' % (version, level, identifier)
version = Version(version, loose=loose)
self.assertEqual(version.increment(level, identifier=identifier), expected, msg=msg)
class RangeTestCase(unittest.TestCase):
# largely taken from https://github.com/npm/node-semver/blob/master/test/index.js
def test_ranges(self):
data = [
['1.2.3 - 2.3.4', '>=1.2.3 <=2.3.4'],
['1.2 - 2.3.4', '>=1.2.0 <=2.3.4'],
['1.2.3 - 2.3', '>=1.2.3 <2.4.0'],
['1.2.3 - 2', '>=1.2.3 <3.0.0'],
['~1.2.3', '>=1.2.3 <1.3.0'],
['~1.2', '>=1.2.0 <1.3.0'],
['~1', '>=1.0.0 <2.0.0'],
['~0.2.3', '>=0.2.3 <0.3.0'],
['~0.2', '>=0.2.0 <0.3.0'],
['~0', '>=0.0.0 <1.0.0'],
['~1.2.3-beta.2', '>=1.2.3-beta.2 <1.3.0'],
['^1.2.3', '>=1.2.3 <2.0.0'],
['^0.2.3', '>=0.2.3 <0.3.0'],
['^0.0.3', '>=0.0.3 <0.0.4'],
['^1.2.3-beta.2', '>=1.2.3-beta.2 <2.0.0'],
['^0.0.3-beta', '>=0.0.3-beta <0.0.4'],
['^1.2.x', '>=1.2.0 <2.0.0'],
['^0.0.x', '>=0.0.0 <0.1.0'],
['^0.0', '>=0.0.0 <0.1.0'],
['^1.x', '>=1.0.0 <2.0.0'],
['^0.x', '>=0.0.0 <1.0.0'],
['1.2.3 - *', '>=1.2.3'],
['* - 2', '>=0.0.0 <3.0.0'],
['^*', '>=0.0.0'], # is this right?
['', '>=0.0.0'],
['1.0.0 - 2.0.0', '>=1.0.0 <=2.0.0'],
['1.0.0', '1.0.0'],
['>=*', '>=0.0.0'], # node's semver uses *
['', '>=0.0.0'], # node's semver uses *
['*', '>=0.0.0'], # node's semver uses *
['*', '>=0.0.0'], # node's semver uses *
['>=1.0.0', '>=1.0.0'],
['>1.0.0', '>1.0.0'],
['<=2.0.0', '<=2.0.0'],
['1', '>=1.0.0 <2.0.0'],
['<=2.0.0', '<=2.0.0'],
['<=2.0.0', '<=2.0.0'],
['<2.0.0', '<2.0.0'],
['<2.0.0', '<2.0.0'],
['>= 1.0.0', '>=1.0.0'],
['>= 1.0.0', '>=1.0.0'],
['>= 1.0.0', '>=1.0.0'],
['> 1.0.0', '>1.0.0'],
['> 1.0.0', '>1.0.0'],
['<= 2.0.0', '<=2.0.0'],
['<= 2.0.0', '<=2.0.0'],
['<= 2.0.0', '<=2.0.0'],
['< 2.0.0', '<2.0.0'],
['< 2.0.0', '<2.0.0'],
['>=0.1.97', '>=0.1.97'],
['>=0.1.97', '>=0.1.97'],
['0.1.20 || 1.2.4', '0.1.20||1.2.4'],
['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1'],
['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1'],
['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1'],
['||', '>=0.0.0'], # node's semver uses ||
['2.x.x', '>=2.0.0 <3.0.0'],
['1.2.x', '>=1.2.0 <1.3.0'],
['1.2.x || 2.x', '>=1.2.0 <1.3.0||>=2.0.0 <3.0.0'],
['1.2.x || 2.x', '>=1.2.0 <1.3.0||>=2.0.0 <3.0.0'],
['x', '>=0.0.0'], # node's semver uses *
['2.*.*', '>=2.0.0 <3.0.0'],
['1.2.*', '>=1.2.0 <1.3.0'],
['1.2.* || 2.*', '>=1.2.0 <1.3.0||>=2.0.0 <3.0.0'],
['*', '>=0.0.0'], # node's semver uses *
['2', '>=2.0.0 <3.0.0'],
['2.3', '>=2.3.0 <2.4.0'],
['~2.4', '>=2.4.0 <2.5.0'],
['~2.4', '>=2.4.0 <2.5.0'],
['~>3.2.1', '>=3.2.1 <3.3.0'],
['~1', '>=1.0.0 <2.0.0'],
['~>1', '>=1.0.0 <2.0.0'],
['~> 1', '>=1.0.0 <2.0.0'],
['~1.0', '>=1.0.0 <1.1.0'],
['~ 1.0', '>=1.0.0 <1.1.0'],
['^0', '>=0.0.0 <1.0.0'],
['^ 1', '>=1.0.0 <2.0.0'],
['^0.1', '>=0.1.0 <0.2.0'],
['^1.0', '>=1.0.0 <2.0.0'],
['^1.2', '>=1.2.0 <2.0.0'],
['^0.0.1', '>=0.0.1 <0.0.2'],
['^0.0.1-beta', '>=0.0.1-beta <0.0.2'],
['^0.1.2', '>=0.1.2 <0.2.0'],
['^1.2.3', '>=1.2.3 <2.0.0'],
['^1.2.3-beta.4', '>=1.2.3-beta.4 <2.0.0'],
['<1', '<1.0.0'],
['< 1', '<1.0.0'],
['>=1', '>=1.0.0'],
['>= 1', '>=1.0.0'],
['<1.2', '<1.2.0'],
['< 1.2', '<1.2.0'],
['1', '>=1.0.0 <2.0.0'],
['^ 1.2 ^ 1', '>=1.0.0 >=1.2.0 <2.0.0 <2.0.0'], # node's semver doesn't sort: >=1.2.0 <2.0.0 >=1.0.0 <2.0.0
]
for pattern, expanded in data:
pattern = Range(pattern)
result = '||'.join(comparator.desc for comparator in pattern.ranges)
self.assertEqual(result, expanded, msg='%s should expand to %s' % (pattern, expanded))
def test_loose_ranges(self):
data = [
['>01.02.03', '>1.2.3'],
['~1.2.3beta', '>=1.2.3-beta <1.3.0'],
]
for pattern, expanded in data:
pattern = Range(pattern, loose=True)
result = '||'.join(comparator.desc for comparator in pattern.ranges)
self.assertEqual(result, expanded, msg='%s should expand to %s' % (pattern, expanded))
def test_comparators(self):
data = [
['1.0.0 - 2.0.0', [['>=1.0.0', '<=2.0.0']]],
['1.0.0', [['1.0.0']]],
['>=*', [['>=0.0.0']]], # node's semver uses ''
['', [['>=0.0.0']]], # node's semver uses ''
['*', [['>=0.0.0']]], # node's semver uses ''
['*', [['>=0.0.0']]], # node's semver uses ''
['>=1.0.0', [['>=1.0.0']]],
['>=1.0.0', [['>=1.0.0']]],
['>=1.0.0', [['>=1.0.0']]],
['>1.0.0', [['>1.0.0']]],
['>1.0.0', [['>1.0.0']]],
['<=2.0.0', [['<=2.0.0']]],
['1', [['>=1.0.0', '<2.0.0']]],
['<=2.0.0', [['<=2.0.0']]],
['<=2.0.0', [['<=2.0.0']]],
['<2.0.0', [['<2.0.0']]],
['<2.0.0', [['<2.0.0']]],
['>= 1.0.0', [['>=1.0.0']]],
['>= 1.0.0', [['>=1.0.0']]],
['>= 1.0.0', [['>=1.0.0']]],
['> 1.0.0', [['>1.0.0']]],
['> 1.0.0', [['>1.0.0']]],
['<= 2.0.0', [['<=2.0.0']]],
['<= 2.0.0', [['<=2.0.0']]],
['<= 2.0.0', [['<=2.0.0']]],
['< 2.0.0', [['<2.0.0']]],
['<\t2.0.0', [['<2.0.0']]],
['>=0.1.97', [['>=0.1.97']]],
['>=0.1.97', [['>=0.1.97']]],
['0.1.20 || 1.2.4', [['0.1.20'], ['1.2.4']]],
['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1']]],
['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1']]],
['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1']]],
['||', [['>=0.0.0']]], # node's semver uses '||'
['2.x.x', [['>=2.0.0', '<3.0.0']]],
['1.2.x', [['>=1.2.0', '<1.3.0']]],
['1.2.x || 2.x', [['>=1.2.0', '<1.3.0'], ['>=2.0.0', '<3.0.0']]],
['1.2.x || 2.x', [['>=1.2.0', '<1.3.0'], ['>=2.0.0', '<3.0.0']]],
['x', [['>=0.0.0']]], # node's semver uses ''
['2.*.*', [['>=2.0.0', '<3.0.0']]],
['1.2.*', [['>=1.2.0', '<1.3.0']]],
['1.2.* || 2.*', [['>=1.2.0', '<1.3.0'], ['>=2.0.0', '<3.0.0']]],
['1.2.* || 2.*', [['>=1.2.0', '<1.3.0'], ['>=2.0.0', '<3.0.0']]],
['*', [['>=0.0.0']]], # node's semver uses ''
['2', [['>=2.0.0', '<3.0.0']]],
['2.3', [['>=2.3.0', '<2.4.0']]],
['~2.4', [['>=2.4.0', '<2.5.0']]],
['~2.4', [['>=2.4.0', '<2.5.0']]],
['~>3.2.1', [['>=3.2.1', '<3.3.0']]],
['~1', [['>=1.0.0', '<2.0.0']]],
['~>1', [['>=1.0.0', '<2.0.0']]],
['~> 1', [['>=1.0.0', '<2.0.0']]],
['~1.0', [['>=1.0.0', '<1.1.0']]],
['~ 1.0', [['>=1.0.0', '<1.1.0']]],
['~ 1.0.3', [['>=1.0.3', '<1.1.0']]],
['~> 1.0.3', [['>=1.0.3', '<1.1.0']]],
['<1', [['<1.0.0']]],
['< 1', [['<1.0.0']]],
['>=1', [['>=1.0.0']]],
['>= 1', [['>=1.0.0']]],
['<1.2', [['<1.2.0']]],
['< 1.2', [['<1.2.0']]],
['1', [['>=1.0.0', '<2.0.0']]],
# node's semver uses '>=1.0.0', '<2.0.0', '>=2.0.0', '<3.0.0':
['1 2', [['>=1.0.0', '>=2.0.0', '<2.0.0', '<3.0.0']]],
['1.2 - 3.4.5', [['>=1.2.0', '<=3.4.5']]],
['1.2.3 - 3.4', [['>=1.2.3', '<3.5.0']]],
['1.2.3 - 3', [['>=1.2.3', '<4.0.0']]],
# match-nothing ranges
['>*', [['<0.0.0']]],
['<*', [['<0.0.0']]],
]
for pattern, expected_ranges in data:
pattern = Range(pattern)
for expected_range, comparator in zip(expected_ranges, pattern.ranges):
expected_range = ' '.join(expected_range)
self.assertEqual(expected_range, comparator.desc,
msg='%s should expand to %s' % (pattern, expected_range))
def test_range_matches(self):
data = [
['1.0.0 - 2.0.0', '1.2.3'],
['^1.2.3+build', '1.2.3'],
['^1.2.3+build', '1.3.0'],
['1.2.3-pre+asdf - 2.4.3-pre+asdf', '1.2.3'],
['1.2.3-pre+asdf - 2.4.3-pre+asdf', '1.2.3-pre.2'],
['1.2.3-pre+asdf - 2.4.3-pre+asdf', '2.4.3-alpha'],
['1.2.3+asdf - 2.4.3+asdf', '1.2.3'],
['1.0.0', '1.0.0'],
['>=*', '0.2.4'],
['', '1.0.0'],
['*', '1.2.3'],
['>=1.0.0', '1.0.0'],
['>=1.0.0', '1.0.1'],
['>=1.0.0', '1.1.0'],
['>1.0.0', '1.0.1'],
['>1.0.0', '1.1.0'],
['<=2.0.0', '2.0.0'],
['<=2.0.0', '1.9999.9999'],
['<=2.0.0', '0.2.9'],
['<2.0.0', '1.9999.9999'],
['<2.0.0', '0.2.9'],
['>= 1.0.0', '1.0.0'],
['>= 1.0.0', '1.0.1'],
['>= 1.0.0', '1.1.0'],
['> 1.0.0', '1.0.1'],
['> 1.0.0', '1.1.0'],
['<= 2.0.0', '2.0.0'],
['<= 2.0.0', '1.9999.9999'],
['<= 2.0.0', '0.2.9'],
['< 2.0.0', '1.9999.9999'],
['<\t2.0.0', '0.2.9'],
['>=0.1.97', '0.1.97'],
['0.1.20 || 1.2.4', '1.2.4'],
['>=0.2.3 || <0.0.1', '0.0.0'],
['>=0.2.3 || <0.0.1', '0.2.3'],
['>=0.2.3 || <0.0.1', '0.2.4'],
['||', '1.3.4'],
['2.x.x', '2.1.3'],
['1.2.x', '1.2.3'],
['1.2.x || 2.x', '2.1.3'],
['1.2.x || 2.x', '1.2.3'],
['x', '1.2.3'],
['2.*.*', '2.1.3'],
['1.2.*', '1.2.3'],
['1.2.* || 2.*', '2.1.3'],
['1.2.* || 2.*', '1.2.3'],
['*', '1.2.3'],
['2', '2.1.2'],
['2.3', '2.3.1'],
['~2.4', '2.4.0'], # >=2.4.0 <2.5.0
['~2.4', '2.4.5'],
['~>3.2.1', '3.2.2'], # >=3.2.1 <3.3.0,
['~1', '1.2.3'], # >=1.0.0 <2.0.0
['~>1', '1.2.3'],
['~> 1', '1.2.3'],
['~1.0', '1.0.2'], # >=1.0.0 <1.1.0,
['~ 1.0', '1.0.2'],
['~ 1.0.3', '1.0.12'],
['>=1', '1.0.0'],
['>= 1', '1.0.0'],
['<1.2', '1.1.1'],
['< 1.2', '1.1.1'],
['=0.7.x', '0.7.2'],
['<=0.7.x', '0.7.2'],
['>=0.7.x', '0.7.2'],
['<=0.7.x', '0.6.2'],
['~1.2.1 >=1.2.3', '1.2.3'],
['~1.2.1 =1.2.3', '1.2.3'],
['~1.2.1 1.2.3', '1.2.3'],
['~1.2.1 >=1.2.3 1.2.3', '1.2.3'],
['~1.2.1 1.2.3 >=1.2.3', '1.2.3'],
['~1.2.1 1.2.3', '1.2.3'],
['>=1.2.1 1.2.3', '1.2.3'],
['1.2.3 >=1.2.1', '1.2.3'],
['>=1.2.3 >=1.2.1', '1.2.3'],
['>=1.2.1 >=1.2.3', '1.2.3'],
['>=1.2', '1.2.8'],
['^1.2.3', '1.8.1'],
['^0.1.2', '0.1.2'],
['^0.1', '0.1.2'],
['^1.2', '1.4.2'],
['^1.2 ^1', '1.4.2'],
['^1.2.3-alpha', '1.2.3-pre'],
['^1.2.0-alpha', '1.2.0-pre'],
['^0.0.1-alpha', '0.0.1-beta'],
]
for pattern, version in data:
pattern = Range(pattern)
self.assertIn(
version,
pattern,
msg='%s should be in %s' % (version, pattern)
)
def test_loose_range_matches(self):
data = [
['1.2.3pre+asdf - 2.4.3-pre+asdf', '1.2.3'],
['1.2.3-pre+asdf - 2.4.3pre+asdf', '1.2.3'],
['1.2.3pre+asdf - 2.4.3pre+asdf', '1.2.3'],
['*', 'v1.2.3'],
['>=0.1.97', 'v0.1.97'],
# node's semver doesn't consider these a loose range:
['~v0.5.4-pre', '0.5.5'],
['~v0.5.4-pre', '0.5.4'],
]
for pattern, version in data:
pattern = Range(pattern, loose=True)
self.assertIn(
version,
pattern,
msg='%s should be in %s' % (version, pattern)
)
def test_range_non_matches(self):
data = [
['1.0.0 - 2.0.0', '2.2.3'],
['1.2.3+asdf - 2.4.3+asdf', '1.2.3-pre.2'],
['1.2.3+asdf - 2.4.3+asdf', '2.4.3-alpha'],
['^1.2.3+build', '2.0.0'],
['^1.2.3+build', '1.2.0'],
['^1.2.3', '1.2.3-pre'],
['^1.2', '1.2.0-pre'],
['>1.2', '1.3.0-beta'],
['<=1.2.3', '1.2.3-beta'],
['^1.2.3', '1.2.3-beta'],
['=0.7.x', '0.7.0-asdf'],
['>=0.7.x', '0.7.0-asdf'],
['1.0.0', '1.0.1'],
['>=1.0.0', '0.0.0'],
['>=1.0.0', '0.0.1'],
['>=1.0.0', '0.1.0'],
['>1.0.0', '0.0.1'],
['>1.0.0', '0.1.0'],
['<=2.0.0', '3.0.0'],
['<=2.0.0', '2.9999.9999'],
['<=2.0.0', '2.2.9'],
['<2.0.0', '2.9999.9999'],
['<2.0.0', '2.2.9'],
['>=0.1.97', '0.1.93'],
['0.1.20 || 1.2.4', '1.2.3'],
['>=0.2.3 || <0.0.1', '0.0.3'],
['>=0.2.3 || <0.0.1', '0.2.2'],
['2.x.x', '1.1.3'],
['2.x.x', '3.1.3'],
['1.2.x', '1.3.3'],
['1.2.x || 2.x', '3.1.3'],
['1.2.x || 2.x', '1.1.3'],
['2.*.*', '1.1.3'],
['2.*.*', '3.1.3'],
['1.2.*', '1.3.3'],
['1.2.* || 2.*', '3.1.3'],
['1.2.* || 2.*', '1.1.3'],
['2', '1.1.2'],
['2.3', '2.4.1'],
['~2.4', '2.5.0'], # >= 2.4.0 < 2.5.0
['~2.4', '2.3.9'],
['~>3.2.1', '3.3.2'], # >= 3.2.1 < 3.3.0
['~>3.2.1', '3.2.0'], # >= 3.2.1 < 3.3.0
['~1', '0.2.3'], # >= 1.0.0 < 2.0.0
['~>1', '2.2.3'],
['~1.0', '1.1.0'], # >= 1.0.0 < 1.1.0
['<1', '1.0.0'],
['>=1.2', '1.1.1'],
['=0.7.x', '0.8.2'],
['>=0.7.x', '0.6.2'],
['<0.7.x', '0.7.2'],
['<1.2.3', '1.2.3-beta'],
['=1.2.3', '1.2.3-beta'],
['>1.2', '1.2.8'],
['^1.2.3', '2.0.0-alpha'],
['^1.2.3', '1.2.2'],
['^1.2', '1.1.9'],
['^1.2.3', '2.0.0-pre'],
]
for pattern, version in data:
pattern = Range(pattern)
self.assertNotIn(
version,
pattern,
msg='%s should not be in %s' % (version, pattern)
)
def test_loose_range_non_matches(self):
data = [
['1', '1.0.0beta'],
['<1', '1.0.0beta'],
['< 1', '1.0.0beta'],
['>=0.1.97', 'v0.1.93'],
['1', '2.0.0beta'],
['*', 'v1.2.3-foo'],
# node's semver doesn't consider these a loose range:
['~v0.5.4-beta', '0.5.4-alpha'],
]
for pattern, version in data:
pattern = Range(pattern, loose=True)
self.assertNotIn(
version,
pattern,
msg='%s should not be in %s' % (version, pattern)
)
def test_invalid(self):
data = [
'blerg',
'git+https://user:[email protected]/foo',
'>=1 a',
'? >=1',
]
for pattern in data:
with self.assertRaises(ValueError, msg='Pattern should be invalid %s' % pattern):
Range(pattern, loose=True)
def test_min_satisfying(self):
data = [
[['1.2.3', '1.2.4'], '1.2', '1.2.3'],
[['1.2.4', '1.2.3'], '1.2', '1.2.3'],
[['1.2.3', '1.2.4', '1.2.5', '1.2.6'], '~1.2.3', '1.2.3'],
]
for versions, pattern, expected in data:
pattern = Range(pattern)
self.assertEqual(pattern.lowest_version(versions), expected)
data = [
[
['1.1.0', '1.2.0', '1.2.1', '1.3.0', '2.0.0b1', '2.0.0b2', '2.0.0b3', '2.0.0', '2.1.0'],
'~2.0.0', '2.0.0'
],
]
for versions, pattern, expected in data:
pattern = Range(pattern, loose=True)
self.assertEqual(pattern.lowest_version(versions), expected)
def test_max_satisfying(self):
data = [
[['1.2.3', '1.2.4'], '1.2', '1.2.4'],
[['1.2.4', '1.2.3'], '1.2', '1.2.4'],
[['1.2.3', '1.2.4', '1.2.5', '1.2.6'], '~1.2.3', '1.2.6'],
]
for versions, pattern, expected in data:
pattern = Range(pattern)
self.assertEqual(pattern.highest_version(versions), expected)
data = [
[
['1.1.0', '1.2.0', '1.2.1', '1.3.0', '2.0.0b1', '2.0.0b2', '2.0.0b3', '2.0.0', '2.1.0'],
'~2.0.0', '2.0.0'
],
]
for versions, pattern, expected in data:
pattern = Range(pattern, loose=True)
self.assertEqual(pattern.highest_version(versions), expected)
class CodeStyleTestCase(unittest.TestCase):
def test_code_style(self):
try:
subprocess.check_output(['python', '-m', 'flake8'])
except subprocess.CalledProcessError as e:
self.fail('Code style checks failed\n\n%s' % e.output.decode('utf-8'))
|
py | b40c299b707ffae7590aabc66305a10c1a586a03 | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
__all__ = ['osnet_ain_x1_0']
pretrained_urls = {
'osnet_ain_x1_0':
'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1, bn=True):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = None
if bn:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
return self.relu(x)
class LightConvStream(nn.Module):
"""Lightweight convolution stream."""
def __init__(self, in_channels, out_channels, depth):
super(LightConvStream, self).__init__()
assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format(
depth
)
layers = []
layers += [LightConv3x3(in_channels, out_channels)]
for i in range(depth - 1):
layers += [LightConv3x3(out_channels, out_channels)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU()
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU()
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
super(OSBlock, self).__init__()
assert T >= 1
assert out_channels >= reduction and out_channels % reduction == 0
mid_channels = out_channels // reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, T + 1):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = x2 + self.gate(x2_t)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
return F.relu(out)
class OSBlockINin(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
super(OSBlockINin, self).__init__()
assert T >= 1
assert out_channels >= reduction and out_channels % reduction == 0
mid_channels = out_channels // reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, T + 1):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = x2 + self.gate(x2_t)
x3 = self.conv3(x2)
x3 = self.IN(x3) # IN inside residual
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. arXiv preprint, 2019.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
conv1_IN=False,
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
self.feature_dim = feature_dim
# convolutional backbone
self.conv1 = ConvLayer(
3, channels[0], 7, stride=2, padding=3, IN=conv1_IN
)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0], layers[0], channels[0], channels[1]
)
self.pool2 = nn.Sequential(
Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2)
)
self.conv3 = self._make_layer(
blocks[1], layers[1], channels[1], channels[2]
)
self.pool3 = nn.Sequential(
Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2)
)
self.conv4 = self._make_layer(
blocks[2], layers[2], channels[2], channels[3]
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
self.feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _make_layer(self, blocks, layer, in_channels, out_channels):
layers = []
layers += [blocks[0](in_channels, out_channels)]
for i in range(1, len(blocks)):
layers += [blocks[i](out_channels, out_channels)]
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU())
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.InstanceNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_ain_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
conv1_IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ain_x1_0')
return model
|
py | b40c2a27bdaa967612fa9f23fb06094fe2782859 |
#from model.group import Group
#import pytest
#import rabdom
#import string
#def random_string(prefix, maxlen):
# symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
# return prefix + "".join([random.choice(symbols) for i in range (random.randrange(maxlen))])
|
py | b40c2a305586f3d9c93a9cd1ac4af49bd5ceb586 | #Method 1: passing list in the class.
class zeroTriplets:
def __init__(self,arr):
self.arr = arr
def threeSum(self):
n = len(self.arr)
for i in range(0, n-2):
for j in range(i+1, n-1):
for k in range(j+1, n):
if (self.arr[i] + self.arr[j] + self.arr[k] == 0):
print(self.arr[i], self.arr[j], self.arr[k])
# else:
# print(" not exist ")
class display_arr(zeroTriplets):
def print_list(self):
print(self.arr)
list1 = [-25, -10, -7, -3, 2, 4, 8, 10]
a = zeroTriplets(list1)
a.threeSum()
b = display_arr(list1)
b.print_list()
#------------------------------------------------------------------------------------------------
#method 2:passing list to the function in the class.
class py_solution:
def threeSum(self, arr):
n = len(arr)
for i in range(0, n-2):
for j in range(i+1, n-1):
for k in range(j+1, n):
if (arr[i] + arr[j] + arr[k] == 0):
print(arr[i], arr[j], arr[k])
# else:
# print(" not exist ")
# method 1:-
a = py_solution()
a.threeSum([-25, -10, -7, -3, 2, 4, 8, 10])
# method 2:-
py_solution().threeSum([-25, -10, -7, -3, 2, 4, 8, 10]) |
py | b40c2a5394ce3b0b1d52a9b976a525ad8fa3112a | import json
import datetime
from datetime import timedelta
from dateutil import parser
def diferenca_entre_datas(data1, data2):
d1 = datetime.datetime.strptime(data1, "%d-%m-%Y")
d2 = datetime.datetime.strptime(data2, "%d-%m-%Y")
return abs((d1 - d2).days)
def ler_arquivo_json_tipo_1(nome_arquivo):
with open(nome_arquivo, 'r', encoding='utf8') as f:
return json.load(f)
def gerar_historico_releases(arquivo_json):
arquivo_saida = []
repo_id_ant = 0
qtd_releases_ant = 0
repo_ant = {}
for i in range(len(arquivo_json)):
print(arquivo_json[i]['name'])
if arquivo_json[i]['data'] == "":
arquivo_json[i]['data'] = arquivo_json[i]['data_criacao']
if arquivo_json[i]['id'] != repo_id_ant:
if repo_id_ant != 0 and repo_ant['data'] != "":
qtd_dias = diferenca_entre_datas(repo_ant['data'],'31-05-2019')
data = datetime.datetime.strptime(repo_ant['data'], "%d-%m-%Y")
for x in range(qtd_dias):
data = data + timedelta(days=1)
data_string = datetime.datetime.strftime(data,"%d-%m-%Y")
registro = {}
registro['id'] = repo_ant['id']
registro['data'] = data_string
registro['releases'] = qtd_releases_ant
arquivo_saida.append(registro)
repo_id_ant = arquivo_json[i]['id']
qtd_releases_ant = 0
if arquivo_json[i]['data_criacao'] != arquivo_json[i]['data']:
qtd_dias = diferenca_entre_datas(arquivo_json[i]['data_criacao'],arquivo_json[i]['data'])
data_releases = datetime.datetime.strptime(arquivo_json[i]['data'],"%d-%m-%Y")
data_criacao = datetime.datetime.strptime(arquivo_json[i]['data_criacao'],"%d-%m-%Y")
if data_criacao < data_releases:
data = data_criacao
for x in range(qtd_dias+1):
data_string = datetime.datetime.strftime(data,"%d-%m-%Y")
registro = {}
registro['id'] = arquivo_json[i]['id']
registro['data'] = data_string
if arquivo_json[i]['data'] == data_string:
registro['releases'] = int(arquivo_json[i]['quantidade_releases'])
qtd_releases_ant = int(arquivo_json[i]['quantidade_releases'])
else:
registro['releases'] = 0
arquivo_saida.append(registro)
data = data + timedelta(days=1)
else:
data = data_releases
data_string = datetime.datetime.strftime(data,"%d-%m-%Y")
registro = {}
registro['id'] = arquivo_json[i]['id']
registro['data'] = data_string
registro['releases'] = int(arquivo_json[i]['quantidade_releases'])
qtd_releases_ant = int(arquivo_json[i]['quantidade_releases'])
arquivo_saida.append(registro)
else:
registro = {}
registro['id'] = arquivo_json[i]['id']
registro['data'] = arquivo_json[i]['data_criacao']
registro['releases'] = int(arquivo_json[i]['quantidade_releases'])
qtd_releases_ant = int(arquivo_json[i]['quantidade_releases'])
arquivo_saida.append(registro)
else:
qtd_dias = diferenca_entre_datas(repo_ant['data'],arquivo_json[i]['data'])
data = datetime.datetime.strptime(repo_ant['data'], "%d-%m-%Y")
for y in range(qtd_dias):
data = data + timedelta(days=1)
data_string = datetime.datetime.strftime(data,"%d-%m-%Y")
registro = {}
registro['id'] = arquivo_json[i]['id']
registro['data'] = data_string
if arquivo_json[i]['data'] == data_string:
qtd_releases_ant = qtd_releases_ant + int(arquivo_json[i]['quantidade_releases'])
registro['releases'] = qtd_releases_ant
arquivo_saida.append(registro)
repo_ant = arquivo_json[i]
return arquivo_saida
def gravar_arquivo_json(nome_arquivo, dados):
with open(nome_arquivo, 'w', encoding='utf-8') as f:
json.dump(dados, f, ensure_ascii=False, indent=2, sort_keys=False, separators=(',' , ':'))
#================================================================================#
# MAIN #
#================================================================================#
print("Informe o nome do arquivo.json dos releases: ")
nome_arquivo_releases = input()
arquivo_json = ler_arquivo_json_tipo_1(nome_arquivo_releases)
arquivos_json_saida = gerar_historico_releases(arquivo_json)
nome_arquivo_releases_saida = f'saida-{str(nome_arquivo_releases)}'
gravar_arquivo_json(nome_arquivo_releases_saida,arquivos_json_saida)
|
py | b40c2a98ab3cdfc96a671067f7f7c3f541b7a8bf | ## The Drug Interaction Knowledge Base (DIKB) is (C) Copyright 2005 by
## Richard Boyce
## Original Authors:
## Richard Boyce
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the
## Free Software Foundation, Inc., 59 Temple Place - Suite 330,
## Boston, MA 02111-1307, USA.
## -----------------------------------------------------------------
## File: DrugKB_webpages.py
## functions for generating re-used html snippets
import string, re, os, time, glob
from HTMLcolors import *
from HTMLgen import *
from DIKB.ModelUtils import *
from DIKB.EvidenceModel import *
### define functions for the various html pages
def addAssertionsForm(object_lst, slot_lst, object_type):
form = Form('editAssertions.rpy')
form.append("Please select the object that you want to make an assertion about:")
form.append(BR())
if len(object_lst) == 0:
object_lst = ["No Objects to choose from!"]
objects = Select(object_lst, name='object', size=1, multiple=0)
form.append(objects)
form.append(P())
form.append("Please select the slot you have information on:")
form.append(BR())
slots = Select(slot_lst, name='slot', size=1, multiple=0)
form.append(slots)
form.append(P())
"""gg - Added a hidden field for object type so we don't have to try to figure it out by name in editAssertions.rpy"""
hidden = Input(type='hidden', name='type', value=object_type)
form.append(hidden)
form.submit = Input(type='submit', value='Add a value for this assertion')
return form
def selectObjectSlotForm(submit_url, obj, slot, value_lst):
form = Form(submit_url)
form.append(Input(type='hidden', name='object', value=obj))
form.append(Input(type='hidden', name='slot', value=slot))
form.append(Input(type='hidden', name='assumption_picks', value=""))
form.append(Input(type='hidden', name='new_assumption', value="ignore"))
form.append(" ".join(["Edit an assertion for <b>object: ", obj,"</b>",
" and <b>slot:", slot, "</b>"]))
form.append(BR())
form.append("Please select a value for the slot that this evidence suggests:")
form.append(BR())
form.append(P())
values = Select(value_lst, name='value', size=1, multiple=0)
form.append(values)
form.append(P())
form.append(Input(type='checkbox', name='assert-by-default', llabel='Assert by default with no evidence support? '))
form.submit = Input(type='submit', name="add-assumptions", value='Add assumptions')
form.append(P(), Input(type='submit', name="add-assumptions", value='No assumptions needed'))
return form
def addAssumptionsForm(submit_url, obj, slot, value, assumption_keys, assumption_picks):
form = Form(submit_url)
form.append(Input(type='hidden', name='object', value=obj))
form.append(Input(type='hidden', name='slot', value=slot))
form.append(Input(type='hidden', name='value', value=value))
form.append(Input(type='hidden', name='assumption_picks', value=",".join(assumption_picks)))
""" get assumptions that this use of evidence depends on """
form.append('''<a name="add_assumptions"></a>''')
assumption_picks = filter(lambda x: x != "", assumption_picks)
form.append(" ".join(["If necessary, add an assumption that this use of evidence depends on; currently - <br><tt>",
"<br>".join(assumption_picks),"</tt><br>"]))
assumption_keys.sort()
assumption_keys.insert(0,"")
form.append(Select(assumption_keys, name='new_assumption', size=1, multiple=0))
form.append(Input(type='submit', name="add-assumptions", value='Add this assumption'))
form.append("<br>")
form.submit = Input(type='submit', name="add-assumptions", value='Done')
return form
def addEvidenceForm(submit_url, obj, slot, value, assumption_picks, default):
form = Form(submit_url)
form.append(Input(type='hidden', name='object', value=obj))
form.append(Input(type='hidden', name='slot', value=slot))
form.append(Input(type='hidden', name='value', value=value))
form.append(Input(type='hidden', name='assumption_picks', value=",".join(assumption_picks)))
if default:
form.append(" ".join(["<h3>Assertion '<b>", obj,"_", slot, "_", value, '</b> will be enabled by default however you can still enter evidence in case the validity of the assertion will later be evaluated by evidence.</h3>']))
form.append(Input(type='hidden', name='assert_by_default', value="True"))
form.append(Input(type='submit', name="evidence-entry", value='Do Not Add Evidence at This Time'), P())
else:
form.append(" ".join(["Add evidence for <b>object:", obj,"</b>",
", <b>slot:", slot, "</b>, with <b>value:", value, '</b>']))
form.append(BR())
r = reviewers
r.sort()
form.append(Select(r, name='reviewer', size=1, multiple=0),P())
form.append("".join(["Is this evidence for or against slot value <b>", value,
"</b>?"]),P())
radio_for = Input(type='radio', name='position', value='for', checked=True, llabel='Evidence for')
radio_against = Input(type='radio', name='position', value='against', llabel='Evidence against')
form.append(radio_for,BR(),radio_against,P())
if slot == 'bioavailability':
form.append("The proportion (%/100) of an active ingredient's dose that reaches systemic circulation: ")
form.append(Input(type='text', name='cont_value', size=10), P())
elif slot == 'first_pass_effect':
form.append("The proportion (%/100) of an active ingredient's absorbed dose that is cleared by first-pass metabolism: ")
form.append(Input(type='text', name='cont_value', size=10), P())
elif slot == 'fraction_absorbed':
form.append("The proportion (%/100) of an active ingredient's dose that is absorbed in the gastro-intestinal tract: ")
form.append(Input(type='text', name='cont_value', size=10), P())
elif slot == 'increases_auc':
form.append("The number of study participants: ")
form.append(Input(type='text', name='numb_subj', size=10), P())
form.append("The object drug's's dose in grams: ")
form.append(Input(type='text', name='object_dose', size=10), P())
form.append("The precipitant drug's's dose in grams: ")
form.append(Input(type='text', name='precip_dose', size=10), P())
form.append("AUC_i/AUC (AUC_i: the AUC of the object drug in the presence of inhibitor): ")
form.append(Input(type='text', name='cont_value', size=10), P())
elif slot == 'inhibition_constant':
form.append("The inhibition constant, k_i, in grams/L: ")
form.append(Input(type='text', name='cont_value', size=10), P())
e_it = In_vitro_inhibition_study()
e_s = e_it.enzyme_system.range
form.append("The enzyme system used in this study: ")
form.append(Select(e_s, name='enzyme_system', size=1, multiple=0),P())
elif slot == 'maximum_concentration':
form.append("The number of subjects in the study (if available): ")
form.append(Input(type='text', name='numb_subjects', size=10), P())
form.append("The dose of the drug from which the C_max was derived in grams: ")
form.append(Input(type='text', name='dose', size=10), P())
form.append("The maximum concentration, C_max, in grams/L: ")
form.append(Input(type='text', name='cont_value', size=10), P())
elif slot == 'minimum_therapeutic_dose':
form.append("The usual (or commonly accepted) minimum therapeutic dose in <I>grams</I> per day: ")
form.append(Input(type='text', name='cont_value', size=10), P())
elif slot == 'minimum_therapeutic_dose_is_at_least':
form.append("A dose (in <I>grams</I> per day) assumed to be larger than the usual (or commonly accepted) minimum therapeutic dose (the system will confirm the validity of this assertion during inference): ")
form.append(Input(type='text', name='cont_value', size=10), P())
form.append("Please input a pointer to this evidence, For example a PubMed ID, a url, or the article identifier from the Drug KB bibliography:")
form.append(P())
form.append(Input(type='text', name='pointer', size=55), P())
form.append("Please paste or type in relevant information about the evidence including data required by inclusion criteria:",BR())
form.append(Textarea(name='quote', rows=20, cols=55), P())
"""evidence type specific input"""
"""get evidence types"""
try:
f = open("data/evidence-types", 'r')
except IOError, err:
warning(" ".join(["Could not open file containing evidence types at:",getcwd(),"data/evidence-types",
"Please make sure this file exists. Returning None"]) , 1)
return None
types = f.read()
f.close()
reg = re.compile("^[_A-Za-z0-9]+",re.MULTILINE)
all_types = reg.findall(types)
all_types.sort()
lst = types.split('\n')
lst.sort()
form.append("<br><b>Please select one evidence type from the set of evidence types listed below:</b>",BR())
cnt = 0
for item in lst:
radio = Input(type='radio', name='type', value=all_types[cnt], rlabel=item)
form.append(radio, BR(), BR())
cnt = cnt + 1
form.submit = Input(type='submit', name="evidence-entry", value='Add Evidence')
return form
def readyToClassifyForm(object_slot_value, state):
form = Form("".join(['viewData.rpy#',object_slot_value]))
form.append(Input(type='hidden', name='obj_slot_val', value = object_slot_value))
form.append("Ready for classification: ")
radio_True = Input(type='radio', name='state', value='True', checked = state, llabel='True')
radio_False = Input(type='radio', name='state', value='False', checked=(not state), llabel='False')
form.append(BR(),radio_True,BR(),radio_False,BR())
form.submit = Input(type='submit', value='Change Classification Status')
return form
def assertionTableView(assertion):
"""Create a table view of an assertion
@param assertion:EvidenceBase::Assertion instance
returns: an HTMLgen Table instance
"""
title = 'Evidence'
t = Table(title)
t_content = []
if len(assertion.evidence_for) == 0:
tmp = ['No evidence for!']
t_content.append(tmp)
else:
for i in assertion.evidence_for:
e = []
e = [Bold('Evidence For (item %s)').__str__() % assertion.evidence_for.index(i),
Bold('Evidence Type: ').__str__() + i.evidence_type.value,
Bold('Pointer: ').__str__() + make_link(i.doc_pointer),
Bold('Reviewer: ').__str__() + i.reviewer.value]
t_content.append(e)
e = ['', Bold('Quote: ').__str__() + i.quote]
t_content.append(e)
e = ['', Bold('Assumptions: ').__str__() + "<br>".join(i.assumptions.getEntries())]
t_content.append(e)
if assertion.slot in ['bioavailability', 'first_pass_effect', 'fraction_absorbed', 'inhibition_constant', 'increases_auc', 'maximum_concentration']:
e = ['', Bold(assertion.slot + ": ").__str__() + str(i.value)]
t_content.append(e)
if assertion.slot == 'inhibition_constant':
e = ['', Bold('enzyme_system: ').__str__() + str(i.enzyme_system.getEntry())]
t_content.append(e)
elif assertion.slot == 'increases_auc':
e = ['', Bold('object_dose: ').__str__() + str(i.object_dose), Bold('precip_dose: ').__str__() + str(i.precip_dose)]
t_content.append(e)
e = ['', Bold('numb_subj: ').__str__() + str(i.numb_subj)]
t_content.append(e)
elif assertion.slot == 'maximum_concentration':
e = ['', Bold('dose: ').__str__() + str(i.dose), Bold('numb_subjects: ').__str__() + str(i.numb_subjects)]
t_content.append(e)
if len(assertion.evidence_against) == 0:
if assertion.slot in ['bioavailability', 'first_pass_effect', 'fraction_absorbed', 'inhibition_constant', 'increases_auc', 'maximum_concentration']:
msg = [Bold('Evidence Against N/A').__str__()]
else:
msg = [Bold('No evidence against!').__str__()]
t_content.append(msg)
else:
for i in assertion.evidence_against:
e = []
e = [Bold('Evidence Against (item %s)').__str__() % assertion.evidence_against.index(i),
Bold('Evidence Type: ').__str__() + i.evidence_type.value,
Bold('Pointer: ').__str__() + make_link(i.doc_pointer),
Bold('Reviewer: ').__str__() + i.reviewer.value]
t_content.append(e)
e = ['', Bold('Quote: ').__str__() + i.quote]
t_content.append(e)
e = ['', Bold('Assumptions: ').__str__() + "<br>".join(i.assumptions.getEntries())]
t_content.append(e)
t.body = t_content
return t
def assertionShortTableView(assertion):
"""Create a simplified table view of an assertion
@param assertion:EvidenceBase::Assertion instance
returns: an HTMLgen Table instance
"""
title = 'Evidence'
t = Table(title)
t_content = []
if len(assertion.evidence_for) == 0:
tmp = ['No evidence for!']
t_content.append(tmp)
else:
for i in assertion.evidence_for:
e = []
e = [Bold('Evidence For (item %s)').__str__() % assertion.evidence_for.index(i),
Bold('Evidence Type: ').__str__() + i.evidence_type.value]
t_content.append(e)
if len(assertion.evidence_against) == 0:
if assertion.slot in ['bioavailability', 'first_pass_effect', 'fraction_absorbed', 'inhibition_constant', 'increases_auc', 'maximum_concentration']:
msg = [Bold('Evidence Against N/A').__str__()]
else:
msg = [Bold('No evidence against!').__str__()]
t_content.append(msg)
else:
for i in assertion.evidence_against:
e = []
e = [Bold('Evidence Against (item %s)').__str__() % assertion.evidence_against.index(i),
Bold('Evidence Type: ').__str__() + i.evidence_type.value]
t_content.append(e)
t.body = t_content
return t
def make_link(pointer):
"""return a pubmed url query to the pointer if it is a pmid"""
reg = re.compile("[a-zA-Z]+")
link_head ='''<a target="new" href="http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Retrieve&db=pubmed&dopt=Abstract&list_uids='''
link_tail = '''&query_hl=1"'''
end_tag = '''>'''
close_tag = '''</a>'''
if reg.match(pointer):
return pointer
else:
return "".join([link_head,pointer,link_tail,end_tag,pointer,close_tag])
|
py | b40c2abb065f8ee1799b5c20a04af323c4378b27 | # copied from torch_scatter
from typing import Optional, Tuple
import torch
def broadcast(src: torch.Tensor, other: torch.Tensor, dim: int):
if dim < 0:
dim = other.dim() + dim
if src.dim() == 1:
for _ in range(0, dim):
src = src.unsqueeze(0)
for _ in range(src.dim(), other.dim()):
src = src.unsqueeze(-1)
src = src.expand(other.size())
return src
def scatter_sum(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
out: Optional[torch.Tensor] = None,
dim_size: Optional[int] = None) -> torch.Tensor:
index = broadcast(index, src, dim)
if out is None:
size = list(src.size())
if dim_size is not None:
size[dim] = dim_size
elif index.numel() == 0:
size[dim] = 0
else:
size[dim] = int(index.max()) + 1
out = torch.zeros(size, dtype=src.dtype, device=src.device)
return out.scatter_add_(dim, index, src)
else:
return out.scatter_add_(dim, index, src)
def scatter_add(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
out: Optional[torch.Tensor] = None,
dim_size: Optional[int] = None) -> torch.Tensor:
return scatter_sum(src, index, dim, out, dim_size)
def scatter_mul(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
out: Optional[torch.Tensor] = None,
dim_size: Optional[int] = None) -> torch.Tensor:
return torch.ops.torch_scatter.scatter_mul(src, index, dim, out, dim_size)
def scatter_mean(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
out: Optional[torch.Tensor] = None,
dim_size: Optional[int] = None) -> torch.Tensor:
out = scatter_sum(src, index, dim, out, dim_size)
dim_size = out.size(dim)
index_dim = dim
if index_dim < 0:
index_dim = index_dim + src.dim()
if index.dim() <= index_dim:
index_dim = index.dim() - 1
ones = torch.ones(index.size(), dtype=src.dtype, device=src.device)
count = scatter_sum(ones, index, index_dim, None, dim_size)
count[count < 1] = 1
count = broadcast(count, out, dim)
if out.is_floating_point():
out.true_divide_(count)
else:
out.div_(count, rounding_mode='floor')
return out
def scatter_min(
src: torch.Tensor, index: torch.Tensor, dim: int = -1,
out: Optional[torch.Tensor] = None,
dim_size: Optional[int] = None) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.torch_scatter.scatter_min(src, index, dim, out, dim_size)
def scatter_max(
src: torch.Tensor, index: torch.Tensor, dim: int = -1,
out: Optional[torch.Tensor] = None,
dim_size: Optional[int] = None) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.torch_scatter.scatter_max(src, index, dim, out, dim_size)
def scatter(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
out: Optional[torch.Tensor] = None, dim_size: Optional[int] = None,
reduce: str = "sum") -> torch.Tensor:
r"""
|
.. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/
master/docs/source/_figures/add.svg?sanitize=true
:align: center
:width: 400px
|
Reduces all values from the :attr:`src` tensor into :attr:`out` at the
indices specified in the :attr:`index` tensor along a given axis
:attr:`dim`.
For each value in :attr:`src`, its output index is specified by its index
in :attr:`src` for dimensions outside of :attr:`dim` and by the
corresponding value in :attr:`index` for dimension :attr:`dim`.
The applied reduction is defined via the :attr:`reduce` argument.
Formally, if :attr:`src` and :attr:`index` are :math:`n`-dimensional
tensors with size :math:`(x_0, ..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})`
and :attr:`dim` = `i`, then :attr:`out` must be an :math:`n`-dimensional
tensor with size :math:`(x_0, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})`.
Moreover, the values of :attr:`index` must be between :math:`0` and
:math:`y - 1`, although no specific ordering of indices is required.
The :attr:`index` tensor supports broadcasting in case its dimensions do
not match with :attr:`src`.
For one-dimensional tensors with :obj:`reduce="sum"`, the operation
computes
.. math::
\mathrm{out}_i = \mathrm{out}_i + \sum_j~\mathrm{src}_j
where :math:`\sum_j` is over :math:`j` such that
:math:`\mathrm{index}_j = i`.
.. note::
This operation is implemented via atomic operations on the GPU and is
therefore **non-deterministic** since the order of parallel operations
to the same value is undetermined.
For floating-point variables, this results in a source of variance in
the result.
:param src: The source tensor.
:param index: The indices of elements to scatter.
:param dim: The axis along which to index. (default: :obj:`-1`)
:param out: The destination tensor.
:param dim_size: If :attr:`out` is not given, automatically create output
with size :attr:`dim_size` at dimension :attr:`dim`.
If :attr:`dim_size` is not given, a minimal sized output tensor
according to :obj:`index.max() + 1` is returned.
:param reduce: The reduce operation (:obj:`"sum"`, :obj:`"mul"`,
:obj:`"mean"`, :obj:`"min"` or :obj:`"max"`). (default: :obj:`"sum"`)
:rtype: :class:`Tensor`
.. code-block:: python
from torch_scatter import scatter
src = torch.randn(10, 6, 64)
index = torch.tensor([0, 1, 0, 1, 2, 1])
# Broadcasting in the first and last dim.
out = scatter(src, index, dim=1, reduce="sum")
print(out.size())
.. code-block::
torch.Size([10, 3, 64])
"""
if reduce == 'sum' or reduce == 'add':
return scatter_sum(src, index, dim, out, dim_size)
if reduce == 'mul':
return scatter_mul(src, index, dim, out, dim_size)
elif reduce == 'mean':
return scatter_mean(src, index, dim, out, dim_size)
elif reduce == 'min':
return scatter_min(src, index, dim, out, dim_size)[0]
elif reduce == 'max':
return scatter_max(src, index, dim, out, dim_size)[0]
else:
raise ValueError
def segment_sum_csr(src: torch.Tensor, indptr: torch.Tensor,
out: Optional[torch.Tensor] = None) -> torch.Tensor:
return torch.ops.torch_scatter.segment_sum_csr(src, indptr, out)
def segment_add_csr(src: torch.Tensor, indptr: torch.Tensor,
out: Optional[torch.Tensor] = None) -> torch.Tensor:
return torch.ops.torch_scatter.segment_sum_csr(src, indptr, out)
def segment_mean_csr(src: torch.Tensor, indptr: torch.Tensor,
out: Optional[torch.Tensor] = None) -> torch.Tensor:
return torch.ops.torch_scatter.segment_mean_csr(src, indptr, out)
def segment_min_csr(
src: torch.Tensor, indptr: torch.Tensor,
out: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.torch_scatter.segment_min_csr(src, indptr, out)
def segment_max_csr(
src: torch.Tensor, indptr: torch.Tensor,
out: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.torch_scatter.segment_max_csr(src, indptr, out)
def segment_csr(src: torch.Tensor, indptr: torch.Tensor,
out: Optional[torch.Tensor] = None,
reduce: str = "sum") -> torch.Tensor:
r"""
Reduces all values from the :attr:`src` tensor into :attr:`out` within the
ranges specified in the :attr:`indptr` tensor along the last dimension of
:attr:`indptr`.
For each value in :attr:`src`, its output index is specified by its index
in :attr:`src` for dimensions outside of :obj:`indptr.dim() - 1` and by the
corresponding range index in :attr:`indptr` for dimension
:obj:`indptr.dim() - 1`.
The applied reduction is defined via the :attr:`reduce` argument.
Formally, if :attr:`src` and :attr:`indptr` are :math:`n`-dimensional and
:math:`m`-dimensional tensors with
size :math:`(x_0, ..., x_{m-1}, x_m, x_{m+1}, ..., x_{n-1})` and
:math:`(x_0, ..., x_{m-2}, y)`, respectively, then :attr:`out` must be an
:math:`n`-dimensional tensor with size
:math:`(x_0, ..., x_{m-2}, y - 1, x_{m}, ..., x_{n-1})`.
Moreover, the values of :attr:`indptr` must be between :math:`0` and
:math:`x_m` in ascending order.
The :attr:`indptr` tensor supports broadcasting in case its dimensions do
not match with :attr:`src`.
For one-dimensional tensors with :obj:`reduce="sum"`, the operation
computes
.. math::
\mathrm{out}_i =
\sum_{j = \mathrm{indptr}[i]}^{\mathrm{indptr}[i+1]-1}~\mathrm{src}_j.
Due to the use of index pointers, :meth:`segment_csr` is the fastest
method to apply for grouped reductions.
.. note::
In contrast to :meth:`scatter()` and :meth:`segment_coo`, this
operation is **fully-deterministic**.
:param src: The source tensor.
:param indptr: The index pointers between elements to segment.
The number of dimensions of :attr:`index` needs to be less than or
equal to :attr:`src`.
:param out: The destination tensor.
:param reduce: The reduce operation (:obj:`"sum"`, :obj:`"mean"`,
:obj:`"min"` or :obj:`"max"`). (default: :obj:`"sum"`)
:rtype: :class:`Tensor`
.. code-block:: python
from torch_scatter import segment_csr
src = torch.randn(10, 6, 64)
indptr = torch.tensor([0, 2, 5, 6])
indptr = indptr.view(1, -1) # Broadcasting in the first and last dim.
out = segment_csr(src, indptr, reduce="sum")
print(out.size())
.. code-block::
torch.Size([10, 3, 64])
"""
if reduce == 'sum' or reduce == 'add':
return segment_sum_csr(src, indptr, out)
elif reduce == 'mean':
return segment_mean_csr(src, indptr, out)
elif reduce == 'min':
return segment_min_csr(src, indptr, out)[0]
elif reduce == 'max':
return segment_max_csr(src, indptr, out)[0]
else:
raise ValueError
def gather_csr(src: torch.Tensor, indptr: torch.Tensor,
out: Optional[torch.Tensor] = None) -> torch.Tensor:
return torch.ops.torch_scatter.gather_csr(src, indptr, out) |
py | b40c2b57eb333fc72fca03a4e928f34cbdb46113 | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2020/1/23 9:07
contact: [email protected]
desc:
"""
|
py | b40c2b906a16ee68933630710c3e84f42ad05394 | # -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample data exhibiting histogram summaries.
Run with:
bazel run //tensorboard/plugins/histogram:histograms_demo
"""
from absl import app
import tensorflow as tf
# Directory into which to write tensorboard data.
LOGDIR = "/tmp/histograms_demo"
def run(k, step):
"""
Arguments:
k: a float in the range [0, 1] that affects the histogram values written by the run.
step: an integer value to use for writing summaries.
"""
# Make a normal distribution, with a shifting mean
mean_moving_normal = tf.random.normal(shape=[1000], mean=(5 * k), stddev=1)
# Record that distribution into a histogram summary
tf.summary.histogram(
"normal/moving_mean",
mean_moving_normal,
description="A normal distribution whose mean changes " "over time.",
step=step,
)
# Make a normal distribution with shrinking variance
shrinking_normal = tf.random.normal(shape=[1000], mean=0, stddev=1 - (k))
# Record that distribution too
tf.summary.histogram(
"normal/shrinking_variance",
shrinking_normal,
description="A normal distribution whose variance "
"shrinks over time.",
step=step,
)
# Let's combine both of those distributions into one dataset
normal_combined = tf.concat([mean_moving_normal, shrinking_normal], 0)
# We add another histogram summary to record the combined distribution
tf.summary.histogram(
"normal/bimodal",
normal_combined,
description="A combination of two normal distributions, "
"one with a moving mean and one with "
"shrinking variance. The result is a "
"distribution that starts as unimodal and "
"becomes more and more bimodal over time.",
step=step,
)
# Add a gamma distribution
gamma = tf.random.gamma(shape=[1000], alpha=k)
tf.summary.histogram(
"gamma",
gamma,
description="A gamma distribution whose shape "
"parameter, α, changes over time.",
step=step,
)
# And a poisson distribution
poisson = tf.random.poisson(shape=[1000], lam=k)
tf.summary.histogram(
"poisson",
poisson,
description="A Poisson distribution, which only "
"takes on integer values.",
step=step,
)
# And a uniform distribution
uniform = tf.random.uniform(shape=[1000], maxval=k * 10)
tf.summary.histogram(
"uniform",
uniform,
description="A simple uniform distribution.",
step=step,
)
# And an empty distribution
empty = tf.constant([])
tf.summary.histogram(
"empty",
empty,
description="An empty distribution.",
step=step,
)
# And a distribution consisting of a single unique value
single_value = tf.constant([1.0] * 10)
tf.summary.histogram(
"single_value",
single_value,
description="A distribution containing a single unique value.",
step=step,
)
# Finally, combine everything together!
all_distributions = [
mean_moving_normal,
shrinking_normal,
gamma,
poisson,
uniform,
empty,
single_value,
]
all_combined = tf.concat(all_distributions, 0)
tf.summary.histogram(
"all_combined",
all_combined,
description="An amalgamation of several distributions: a "
"uniform distribution, a gamma distribution, a Poisson "
"distribution, two normal distributions, an empty "
"distribution, and a distribution containing a single "
"unique value.",
step=step,
)
def run_all(logdir, num_summaries=400):
"""Generate a bunch of histogram data, and write it to logdir."""
tf.random.set_seed(0)
writer = tf.summary.create_file_writer(logdir)
with writer.as_default():
for step in range(num_summaries):
k = step / float(num_summaries)
run(k, step)
writer.flush()
print(
"To view results in your browser, run `tensorboard --logdir %s`"
% LOGDIR
)
print(
"Logs can be uploaded publicly to TensorBoard.dev via "
+ "`tensorboard dev upload --logdir %s`" % LOGDIR
)
def main(unused_argv):
print("Running histograms demo. Output saving to %s." % LOGDIR)
run_all(LOGDIR)
print("Done. Output saved to %s." % LOGDIR)
if __name__ == "__main__":
app.run(main)
|
py | b40c2c116b6a131f86af62619b7e98cab005a432 | import os
from tempfile import NamedTemporaryFile
from uuid import uuid4
from urllib import urlencode
from django.utils.deconstruct import deconstructible
from django.conf import settings
from django.core.files.storage import Storage
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django_irods import icommands
from icommands import Session, GLOBAL_SESSION, GLOBAL_ENVIRONMENT, SessionException, IRodsEnv
@deconstructible
class IrodsStorage(Storage):
def __init__(self, option=None):
if option == 'federated':
# resource should be saved in federated zone
self.set_fed_zone_session()
else:
self.session = GLOBAL_SESSION
self.environment = GLOBAL_ENVIRONMENT
icommands.ACTIVE_SESSION = self.session
@property
def getUniqueTmpPath(self):
# return a unique temporary path under IRODS_ROOT directory
return os.path.join(getattr(settings, 'IRODS_ROOT', '/tmp'), uuid4().hex)
def set_user_session(self, username=None, password=None, host=settings.IRODS_HOST,
port=settings.IRODS_PORT, def_res=None, zone=settings.IRODS_ZONE,
userid=0, sess_id=None):
homedir = "/" + zone + "/home/" + username
userEnv = IRodsEnv(
pk=userid,
host=host,
port=port,
def_res=def_res,
home_coll=homedir,
cwd=homedir,
username=username,
zone=zone,
auth=password,
irods_default_hash_scheme='MD5'
)
if sess_id is None:
self.session = Session(session_id=uuid4())
self.environment = self.session.create_environment(myEnv=userEnv)
else:
self.session = Session(session_id=sess_id)
if self.session.session_file_exists():
self.environment = userEnv
else:
self.environment = self.session.create_environment(myEnv=userEnv)
self.session.run('iinit', None, self.environment.auth)
icommands.ACTIVE_SESSION = self.session
# Set iRODS session to wwwHydroProxy for irods_storage input object for iRODS federated
# zone direct file operations
def set_fed_zone_session(self):
if settings.REMOTE_USE_IRODS:
self.set_user_session(username=settings.IRODS_USERNAME,
password=settings.IRODS_AUTH,
host=settings.IRODS_HOST,
port=settings.IRODS_PORT,
def_res=settings.HS_IRODS_USER_ZONE_DEF_RES,
zone=settings.IRODS_ZONE,
sess_id='federated_session')
def delete_user_session(self):
if self.session != GLOBAL_SESSION and self.session.session_file_exists():
self.session.delete_environment()
def download(self, name):
return self._open(name, mode='rb')
def getFile(self, src_name, dest_name):
self.session.run("iget", None, '-f', src_name, dest_name)
def runBagitRule(self, rule_name, input_path, input_resource):
"""
run iRODS bagit rule which generated bag-releated files without bundling
:param rule_name: the iRODS rule name to run
:param input_path: input parameter to the rule that indicates the collection path to
create bag for
:param input_resource: input parameter to the rule that indicates the default resource
to store generated bag files
:return: None
"""
# SessionException will be raised from run() in icommands.py
self.session.run("irule", None, '-F', rule_name, input_path, input_resource)
def zipup(self, in_name, out_name):
"""
run iRODS ibun command to generate zip file for the bag
:param in_name: input parameter to indicate the collection path to generate zip
:param out_name: the output zipped file name
:return: None
"""
self.session.run("imkdir", None, '-p', out_name.rsplit('/', 1)[0])
# SessionException will be raised from run() in icommands.py
self.session.run("ibun", None, '-cDzip', '-f', out_name, in_name)
def unzip(self, zip_file_path, unzipped_folder=None):
"""
run iRODS ibun command to unzip files into a new folder
:param zip_file_path: path of the zipped file to be unzipped
:param unzipped_folder: Optional defaults to the basename of zip_file_path when not
provided. The folder to unzip to.
:return: the folder files were unzipped to
"""
abs_path = os.path.dirname(zip_file_path)
if not unzipped_folder:
unzipped_folder = os.path.splitext(os.path.basename(zip_file_path))[0].strip()
unzipped_folder = self._get_nonexistant_path(os.path.join(abs_path, unzipped_folder))
# SessionException will be raised from run() in icommands.py
self.session.run("ibun", None, '-xDzip', zip_file_path, unzipped_folder)
return unzipped_folder
def _get_nonexistant_path(self, path):
if not self.exists(path):
return path
i = 1
new_path = "{}-{}".format(path, i)
while self.exists(new_path):
i += 1
new_path = "{}-{}".format(path, i)
return new_path
def setAVU(self, name, attName, attVal, attUnit=None):
"""
set AVU on resource collection - this is used for on-demand bagging by indicating
whether the resource has been modified via AVU pairs
Parameters:
:param
name: the resource collection name to set AVU.
attName: the attribute name to set
attVal: the attribute value to set
attUnit: the attribute Unit to set, default is None, but can be set to
indicate additional info
"""
# SessionException will be raised from run() in icommands.py
if attUnit:
self.session.run("imeta", None, 'set', '-C', name, attName, attVal, attUnit)
else:
self.session.run("imeta", None, 'set', '-C', name, attName, attVal)
def getAVU(self, name, attName):
"""
set AVU on resource collection - this is used for on-demand bagging by indicating
whether the resource has been modified via AVU pairs
Parameters:
:param
name: the resource collection name to set AVU.
attName: the attribute name to set
attVal: the attribute value to set
attUnit: the attribute Unit to set, default is None, but can be set to
indicate additional info
"""
# SessionException will be raised from run() in icommands.py
stdout = self.session.run("imeta", None, 'ls', '-C', name, attName)[0].split("\n")
ret_att = stdout[1].strip()
if ret_att == 'None': # queried attribute does not exist
return None
else:
vals = stdout[2].split(":")
return vals[1].strip()
def copyFiles(self, src_name, dest_name, ires=None):
"""
Parameters:
:param
src_name: the iRODS data-object or collection name to be copied from.
dest_name: the iRODS data-object or collection name to be copied to
copyFiles() copied an irods data-object (file) or collection (directory)
to another data-object or collection
"""
if src_name and dest_name:
if '/' in dest_name:
splitstrs = dest_name.rsplit('/', 1)
if not self.exists(splitstrs[0]):
self.session.run("imkdir", None, '-p', splitstrs[0])
if ires:
self.session.run("icp", None, '-rf', '-R', ires, src_name, dest_name)
else:
self.session.run("icp", None, '-rf', src_name, dest_name)
return
def moveFile(self, src_name, dest_name):
"""
Parameters:
:param
src_name: the iRODS data-object or collection name to be moved from.
dest_name: the iRODS data-object or collection name to be moved to
moveFile() moves/renames an irods data-object (file) or collection
(directory) to another data-object or collection
"""
if src_name and dest_name:
if '/' in dest_name:
splitstrs = dest_name.rsplit('/', 1)
if not self.exists(splitstrs[0]):
self.session.run("imkdir", None, '-p', splitstrs[0])
self.session.run("imv", None, src_name, dest_name)
return
def saveFile(self, from_name, to_name, create_directory=False, data_type_str=''):
"""
Parameters:
:param
from_name: the temporary file name in local disk to be uploaded from.
to_name: the data object path in iRODS to be uploaded to
create_directory: create directory as needed when set to True. Default is False
Note if only directory needs to be created without saving a file, from_name should be empty
and to_name should have "/" as the last character
"""
if create_directory:
splitstrs = to_name.rsplit('/', 1)
self.session.run("imkdir", None, '-p', splitstrs[0])
if len(splitstrs) <= 1:
return
if from_name:
try:
if data_type_str:
self.session.run("iput", None, '-D', data_type_str, '-f', from_name, to_name)
else:
self.session.run("iput", None, '-f', from_name, to_name)
except:
if data_type_str:
self.session.run("iput", None, '-D', data_type_str, '-f', from_name, to_name)
else:
# IRODS 4.0.2, sometimes iput fails on the first try.
# A second try seems to fix it.
self.session.run("iput", None, '-f', from_name, to_name)
return
def _open(self, name, mode='rb'):
tmp = NamedTemporaryFile()
self.session.run("iget", None, '-f', name, tmp.name)
return tmp
def _save(self, name, content):
self.session.run("imkdir", None, '-p', name.rsplit('/', 1)[0])
with NamedTemporaryFile(delete=False) as f:
for chunk in content.chunks():
f.write(chunk)
f.flush()
f.close()
try:
self.session.run("iput", None, '-f', f.name, name)
except:
# IRODS 4.0.2, sometimes iput fails on the first try. A second try seems to fix it.
self.session.run("iput", None, '-f', f.name, name)
os.unlink(f.name)
return name
def delete(self, name):
self.session.run("irm", None, "-rf", name)
def exists(self, name):
try:
stdout = self.session.run("ils", None, name)[0]
return stdout != ""
except SessionException:
return False
def ils_l(self, path):
# in it's own method to mock for testing
return self.session.run("ils", None, "-l", path)[0]
def listdir(self, path):
stdout = self.ils_l(path).split("\n")
listing = ([], [], [])
directory = stdout[0][0:-1]
directory_prefix = " C- " + directory + "/"
for i in range(1, len(stdout)):
if stdout[i][:len(directory_prefix)] == directory_prefix:
dirname = stdout[i][len(directory_prefix):].strip()
if dirname:
listing[0].append(dirname)
listing[2].append("-1")
else:
# don't use split for filename to preserve spaces in filename
line = stdout[i].split(None, 6)
if len(line) < 6:
# the last line is empty
continue
if line[1] != '0':
# filter replicas
continue
# create a seperator based off the id, date, &
sep = " ".join(line[3:6])
filename = stdout[i].split(sep)[1].strip()
size = line[3]
if filename:
listing[1].append(filename)
listing[2].append(size)
return listing
def size(self, name):
stdout = self.session.run("ils", None, "-l", name)[0].split()
return int(stdout[3])
def url(self, name, url_download=False, zipped=False):
reverse_url = reverse('django_irods_download', kwargs={'path': name})
query_params = {'url_download': url_download, "zipped": zipped}
return reverse_url + '?' + urlencode(query_params)
def get_available_name(self, name, max_length=None):
"""
Reject duplicate file names rather than renaming them.
"""
if self.exists(name):
raise ValidationError(str.format("File {} already exists.", name))
return name
|
py | b40c2c8c079cc1e6a39efc95164c51920fb9a692 | import os
import re
import warnings
import ipfshttpclient
import utils.config as config
def get_file_suffix(filename, token_id="\\d+"):
"""
Given a filename and an optional token_id, this function returns the file suffix.
If the file has no extension, an empty string is returned.
:param filename
:type filename: str
:param token_id
:type token_id: str | int | None
:return: file_suffix
:rtype: str
"""
token_id_pattern = rf"^{token_id}"
matches = re.search(token_id_pattern, filename)
if matches:
regex = rf"^{token_id}(\.(?P<extension>\w+))?$"
matches = re.search(regex, filename)
if matches and matches.group("extension"):
return matches.group(1)
return ""
else:
raise ValueError("Provided token_id not found in filename")
def infer_cid_from_uri(uri):
"""
Given a URI, this function returns the CID.
Returns None if the CID is not found.
:param uri
:type uri: str
:return: cid
:rtype: str | None
"""
cid_pattern = r"Qm[a-zA-Z0-9-_]+"
matches = re.search(cid_pattern, uri)
if matches:
return matches.group(0)
return None
def is_valid_ipfs_uri(uri):
"""
Given a URI, this functions checks if it's a valid IPFS URI.
:param uri
:type uri: str
:rtype: bool
"""
if uri.find("ipfs") != -1 and infer_cid_from_uri(uri):
return True
return False
def fetch_ipfs_folder(collection_name, cid, parent_folder, timeout=60):
# print(os.getcwd())
# print(f"{os.getcwd()}/{parent_folder}/")
"""
Given a collection name, a cid and an optional timeout, this function downloads the entire metadata folder from IPFS.
:param parent_folder: The parent folder where the collection should be saved.
:type parent_folder: str
:param collection_name The collection name to be used as the folder name
:type collection_name: str
:param cid: The IPFS CID of folder to download
:type cid: str
:param timeout: Connection timeout (in seconds) when connecting to the API daemon
:type timeout: int | None
"""
infura = "/dns/infura-ipfs.io/tcp/5001/https"
ipfs_io = "/dns/ipfs.io/tcp/443/https"
ipfs_gateway_io = "/dns/gateway.ipfs.io/tcp/443/https"
dweb_link = "/dns/dweb.link/tcp/443/https"
pinata = "/dns/gateway.pinata.cloud/tcp/443/https"
warnings.filterwarnings(
"ignore", category=ipfshttpclient.exceptions.VersionMismatch
)
gateways = [pinata, ipfs_gateway_io, infura, dweb_link, ipfs_io]
print("Attempting to download metadata folder from IPFS...\nPlease wait...")
for gateway in range(len(gateways)):
try:
client = ipfshttpclient.connect(addr=gateways[gateway], timeout=timeout)
client.get(f"/ipfs/{cid}", target=f"{os.getcwd()}/{parent_folder}/")
print("Successfully downloaded metadata folder from IPFS")
os.rename(
f"./{parent_folder}/{cid}",
f"./{parent_folder}/{collection_name}",
)
client.close()
break
except Exception:
if gateway < len(gateways) - 1:
print(
"Failed to download metadata folder from IPFS. Trying next gateway..."
)
else:
print("Failed to download metadata folder from IPFS.")
if os.path.exists(f"./{parent_folder}/{cid}"):
os.rename(
f"./{parent_folder}/{cid}",
f"./{parent_folder}/{collection_name}",
)
pass
def format_ipfs_uri(uri):
# Reformat IPFS gateway
ipfs_1 = "ipfs://"
ipfs_2 = "https://ipfs.io/ipfs/"
ipfs_3 = "https://gateway.pinata.cloud/ipfs/"
ipfs_hash_identifier = "Qm"
if config.IPFS_GATEWAY == "":
if uri.startswith(ipfs_1):
uri = ipfs_2 + uri[len(ipfs_1) :]
else:
if uri.startswith(ipfs_1):
uri = config.IPFS_GATEWAY + uri[len(ipfs_1) :]
elif uri.startswith(ipfs_2):
uri = config.IPFS_GATEWAY + uri[len(ipfs_2) :]
elif uri.startswith(ipfs_3):
uri = config.IPFS_GATEWAY + uri[len(ipfs_3) :]
elif "pinata" in uri:
starting_index_of_hash = uri.find(ipfs_hash_identifier)
uri = config.IPFS_GATEWAY + uri[starting_index_of_hash:]
return uri
|
py | b40c2d071e62182cf2f29d62b29c55c44880247f | from django.urls import path
from . import views
urlpatterns = [
path('', views.Mailsearcher, name="mailsearcher"),
path('listen', views.listenMailQuery, name='listen_mail'),
path('search', views.searchMailQuery, name='search_mail')
] |
py | b40c2d775b66ddc74aacdccc753915905d8130c0 |
from setuptools import setup, find_packages, Command
from distutils.core import setup, Command
# you can also import from setuptools
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
import sys
errno = subprocess.call([sys.executable, 'runtests.py'])
raise SystemExit(errno)
setup(
name = 'bype',
packages = find_packages(),
version = '0.1a',
description = 'Python Fluent DSL',
author = 'Maxim Yaskevich',
author_email = '[email protected]',
license='MIT',
tests_require=['pytest'],
test_suite='test',
cmdclass = {'test': PyTest},
url = 'https://github.com/myaskevich/python-bype',
keywords = ['dsl', 'fluent', 'test'],
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 1 - Planning',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
],
)
|
py | b40c2db7220ac7247254c7ba7e141053df230ecc |
import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.switch import Switch
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.clock import Clock
from kivy.properties import StringProperty, NumericProperty, ObjectProperty
#from lympha import *
#import lympha as lympha
sm = ScreenManager()
#pfrom browser import document, bind, html, alert, window
#from javascript import this
#???
addresses = list()
import subprocess
import sys
#for the graph function:
import os
#regex
import re
def recursive_parse(node,substitutions):
if hasattr(node.left,"id"):
if node.left.id in substitutions.keys():
node.left = substitutions[node.left.id]
else:
recursive_parse(node.left,substitutions)
if hasattr(node.right,"id"):
if node.right.id in substitutions.keys():
node.right = substitutions[node.right.id]
else:
recursive_parse(node.right,substitutions)
title = str()
prefilenames = list()
prestarts = list()
# Variables for " 1. Program call through bash-CLI.".
CLI_filename = ""
argv_len = len(sys.argv)
filenames = list()
filename = ""
presteps = int()
# List of statements that should be executed during step 0:
starts = list()
# Steps given from the CLI-command:
steps = 0
local_files = True
# Depending on what the interpreter is supposed to do different modes are used:
mode_graph = False
mode_state = False
mode_exe = False
mode_show = False
mode_map = False
#Graphviz
#d3 = window.d3
# Check if all script files are loaded:
filecheck = False
# Lists of statements:
exe_list = list()
show_list = list()
map_list = list()
series = list()
substates = list()
nextstates = list()
specs = list()
# LYMPHA-langugage grammar:
global_relative_variable1 = None
global_relative_variable2 = None
operator1 = None
statement_value = str()
statement_flow = int()
# Objects to be executed:
exe_objects = list()
CLIcom_segment = 0
#Construction of the section model of the linked list.
class Statement(dict):
MARKER = object()
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError('expected dict')
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value, Statement):
value = Statement(value)
super(Statement, self).__setitem__(key, value)
def __getitem__(self, key):
found = self.get(key, Statement.MARKER)
if found is Statement.MARKER:
found = Statement()
super(Statement, self).__setitem__(key, found)
return found
__setattr__, __getattr__ = __setitem__, __getitem__
# A list of all objects:
object_list = dict()
object_list = Statement(object_list)
#class Statement:
# def __init__(self, flow, name, global_relative_variable1, global_relative_variable2, statement_flow, statement_value, operator1, next_list, binary_list, operation, spec_list):
#
# self.flow = int(flow)
#
# #list of next nodes:
# #next_list = next_list
# self.next_list = list(next_list)
#
# #list of specifications:
# #spec_list = spec_list
# self.spec_list = list()#
#
# #list of contents:
# #binary_list = binary_list
# self.binary_list = list(binary_list)#
#
# #should the binary_list be counted as a sum or an equation?
# #self.summerize = summerize
# self.operation = operation #This holds the operation that are found in the string (left to right)
#
# #name
# self.name = name
#
# #tipping point
# self.global_relative_variable1 = global_relative_variable1
#
# #tipping point
# self.global_relative_variable2 = global_relative_variable2
#
# #relational operator
# self.operator1 = operator1#
#
# #statement_flow
# self.statement_flow = statement_flow
# #if statement_flow == 0 :self.statement_flow = 0
# #else: self.statement_flow = 1#
#
# #statement_value
# self.statement_value = statement_value
def stripComments(code):
code = str(code)
return re.sub(r'(?m)^ *#.*\n?', '', code)
def lexer():
global CLIcom_segment
global series
global filenames
global local_files
#Load file content
global prefilenames
global prestarts
filenames = prefilenames
starts = prestarts
if local_files == True:
for filename in filenames:
textfile = open(filename, 'r')
filetext = textfile.read()
filetext = stripComments(filetext)
filetext = filetext.replace('\n', ' ')
filetext = filetext.replace(' ', ' ')
series.extend(filetext.split(';'))
######
######
global object_list
nexts = list()
conts = list()
#Make new nodes in database
for serie in series:
# document <= serie
# Strategy for splitting:
# words a-z A-Z 0-9
# space = - + \s ->
#
#
prearrowobjs = serie.split('->')
#arrowobjs = re.split('->|=|\+|\-',serie)
arrowobjs = list()
for anobj in prearrowobjs:
almostdone = anobj.split('=')
arrowobjs.append(almostdone[0])
count = 0
oops = str() #
nexts = list()
conts = list()
specs = list()
flow = int()
global_relative_variable1 = float()
operator1 = str()
statement_flow = int()
statement_value = str()
scale = list()
# Devide the script's line strings into objects:
#print(object_list)
pre_count = int(0)
count_objs = int()
#alert("60")
for anobj in arrowobjs:
anobj = re.sub("\s*", "", anobj)
#alert(anobj)
eqobjs = re.compile(r"((<=)|(>=)|(!=)|(==)(<)|(>))").split(anobj)
taken = 0
#Check if the object already exists
for takenkey in range(0,len(object_list)) :
if object_list[takenkey].name == anobj :
taken = 1
#Avoid number gaps
else:
#Spin the list to the end
pre_count += 1
count_objs = pre_count
#Add node at the end of the dicts
#count_objs -= 1
if (anobj) != "" and taken == 0 :
object_list[((count_objs))].name = str(anobj)
object_list[((count_objs))].next_list = list()
object_list[((count_objs))].binary_list = list()
object_list[((count_objs))].operation = str("")
object_list[((count_objs))].flow = 1
object_list[((count_objs))].statement_flow = 1
object_list[((count_objs))].statement_value = str()
object_list[((count_objs))].global_relative_variable1 = float()
object_list[((count_objs))].datatype = ""
#Connect the database nodes
for serie in series:
arrowobj = serie.split('->')
count = 0
nexts = list()
conts = list()
# Connect to the next object
for i in range(0,len(arrowobj)):
for key in range(0,len(object_list)):
thename = str(object_list[key].name)
thename = re.sub("\s*", "", thename)
if i != 0 :
if thename == arrowobj[(0)].replace(" ",""):
nexting = ""
nexting = arrowobj[i].replace(" ","")
if not nexting == "" :
object_list[key].next_list.append(nexting)
#print(object_list[key].next_list)
#print(object_list[key].name)
#Connect to depending objects:
#Types of continuations of the side2 string:
# 1. Operator + Sum of binaries
# 2. Operator + Equation and value
# 3. Constant value
count = 0
for serie in series:
if " = " in serie :
count = 0
sides = serie.split(' = ')
side1 = str(sides[0])
side2 = str(sides[1])
side1 = side1.replace(" ","")
side2 = side2.replace(" ","")
for key in range(0,len(object_list)) :
#Web version TRY############
try:
thename = object_list[key].name
if ("%s" % thename) == sides[0].replace(" ", "") :
try:
#For evaluations
# Check if operator:
if ("==" in sides[1] or "!=" in sides[1] or "<=" in sides[1] or ">=" in sides[1] or "<" in sides[1] or ">" in sides[1] ) and not "->" in sides[1]:
if "==" in (sides[1]) :
object_list[key].operator1="equiv"
# if re.compile(".*>=.*").match(sides[1]) :
if ">=" in (sides[1]) :
object_list[key].operator1="geq"
# if re.compile(".*<=.*").match(sides[1]) :
if "<=" in (sides[1]) :
object_list[key].operator1="leq"
# if re.compile(".*(!=).*").match(sides[1]) :
if "!=" in (sides[1]) :
object_list[key].operator1="no"
if re.compile(".*>.*").match(sides[1]) and not re.compile(r"(>=)").match(sides[1]) :
object_list[key].operator1="g"
if re.compile(".*<.*").match(sides[1]) and not re.compile(r"(<=)").match(sides[1]) :
object_list[key].operator1="l"
#print(object_list[key].operator1)
preop = sides[1].replace(" ","")
bin_chopped = 0
if "\|{" in preop or "|{" in preop :
#if "\|{" in preop and "}|" in preop :
bin_chopped = 1
preop = re.sub(r'(\|{)', ' ', preop)
preop = re.sub(r'(}\|)', ' ', preop)
#preop = preop.replace("}|","")
#preop = re.sub("\|{","", preop)
chopcompile = re.compile(r"(<=|>=|!=|==|<|>)")
operator_chop = re.split(chopcompile, preop)
#alert("operator_chop:[%s]" % operator_chop)
#Tipping point
zerochop = operator_chop[0].replace(" ","")
object_list[key].global_relative_variable1 = zerochop
# chopped into binary list
if bin_chopped == 1 :
#alert("check1")
binary_sums = list(operator_chop[2].split(','))
for binary in binary_sums:
binary = binary.replace(" ","")
if binary != "" :
binary = str(binary.replace(" ",""))
object_list[key].binary_list.append(str(binary))
object_list[key].datatype = "bineval"
#object_list[key].binary_list = list(the_bin_list)<
elif bin_chopped == 0 :
object_list[key].statement_value = operator_chop[2]
object_list[key].datatype = "nonbineval"
#For non-binary values and equations
elif sides[0] != "" and sides[1] != "" :
# # # #print("YYY %s" % object_list[key].name)
#if 0 == len(operator_chop) :
object_list[key].statement_value = sides[1]
object_list[key].datatype = "valu"
except:
pass
except:
pass
count += 1
def turn2func(ev) :
#The goal is to implement the first step factors
global object_list
for an_obj in range(0,len(object_list)):
for a_name in document.select(".factorItems"):
try:
if a_name.id == object_list[an_obj].name and a_name.id != "" :
object_list[an_obj].statement_value = a_name.value
if a_name.value == "1B" or a_name.value == "0B" :
object_list[an_obj].datatype = "bina"
object_list[an_obj].value = a_name.value
elif nomen.isdigit() == True:
object_list[an_obj].datatype = "valu"
object_list[an_obj].value = a_name.value
#else:
#object_list[an_obj].datatype = "nonbineval"
except:
pass
# End results: name, statement_value, datatype:(bina, valu)
try:
del starts[:]
except:
pass
try:
for start_item in document.select(".theStarts"):
starts.append(start_item.value)
temporary_starts.append(start_item.value)
except:
pass
mapfunc()
#document["turn2"].bind("click", turn2func)
#document["addbttn"].bind("click", add_input)
#document["addstart"].bind("click", add_start)
#document["zcriptbttn"].bind("click", zcripts)
#document["menubttn"].bind("click", changeMenu)
#document.getElementById( "index").style.backgroundColor='#EFAB00'
#document.getElementById( "indexsmall").style.backgroundColor='#EFAB00'
#document.getElementById( "index").style.color='#ffffff'
#document.getElementById( "index").className="index2active"
##################
start_turn=0
step_turn=0
#class ScreenOne(FloatLayout):
class ScreenOne(Screen):
inputlabel1 = NumericProperty(0)
#def __init__(self, **kwargs):
def update(self,dt):
#super().__init__(**kwargs)
#mainfunc()
global sm
global sc1
global title
self.switch = Switch()
#self.clear_widgets()
h_box = BoxLayout(orientation='horizontal')
v_box = BoxLayout(orientation='vertical')
#my_show_list = ["My Way", "Wine Drinker", "Boots"]
h_box.my_buttons = [] # if you want to keep an "easy" reference to your buttons to do something with them later
#kivy doesnt crashes because it creates the property automatically
#for message in my_show_list:
switch_box = BoxLayout(orientation='vertical')
label = Label(text=title)
#switch = Switch()
#switch.bind(active=callback)
#try:
# for numbr in range(1, 5):
#for start in starts:
# mapfunc()
# label = Label(text=title)
# sm.add_widget(sc1)
# Clock.unschedule(sc1.__init__())
# return sm
#except:
# pass
switch_box.add_widget(label)
switch_box.add_widget(self.switch)
#h_box.my_buttons.append(switch_box)
h_box.add_widget(switch_box)
v_box.add_widget(h_box)
#self.add_widget(h_box)
okbtn = Button(text="OK")
#okbtn.bind(on_press=self.oking)
okbtn.bind(on_press=self.mapfunc)
v_box.add_widget(okbtn)
#self.remove_widget(self.('main'))
self.add_widget(v_box)
#self.manager.current = 'screen1'
#Function for running the linked list.
def mapfunc(self,*args):
#def mapfunc(self,dt):
#global d3
#global UI
#global CLI_filename
global start_turn
global step_turn
global sm
global sc1
global title
global argv_len
global filename
global filenames
global mode_graph
global mode_state
global filecheck
global mode_exe
global mode_show
global mode_map
global exe_list
#global show_list
global map_list
global series
global substates
#global nextstates
nextstates = list()
global specs
#global global_relative_variable1
global global_relative_variable2
#global operator1
global statement_flow
global statement_value
global object_list
global exe_objects
global starts
global show_list
global steps
#if mode_graph == True:
graphstr = 'digraph lympha {\nnode [shape=record];'
#ADDED INT IN INT(STEPS)
global prefilenames
global prestarts
breaking = False
filenames = prefilenames
starts = prestarts
step_count = 0
start_count = 0
turned = False
for step in range(0, int(steps)):
if step_count > step_turn :
#step_count += 1
turned = True
##breaking = False
if step_count < step_turn :
step_count += 1
turned = False
#breaking = False
if step_count == step_turn :
step_turn += 1
step_count += 1
#nextstates = list()
#print("Steps: %s" % steps)
checked = 0
for start in starts:
if start_count > start_turn :
#start_count += 1
#breaking = True
turned = False
if start_count < start_turn :
start_count += 1
#breaking = True
turned = False
if start_count == start_turn and turned == False:
#breaking = False
turned = True
self.clear_widgets()
start_turn += 1
start_count += 1
for key in range(0,len(object_list)):
endstring = str()
strr=str("%s" % object_list[key].name)
strr = re.sub("\s+", "", strr.strip())
#sm.add_widget(sc1)
#Clock.unschedule(sc1.__init__())
#return sm
if str(start) == strr :
# # # #print("mapfunvc")
#title = object_list[key].name
if object_list[key].flow == 0 or object_list[key].statement_flow == 0:
pre_statement_flow = 0
else:
if object_list[key].name[-1] != "?":
######28
pre_statement_flow = 0
title = object_list[key].name
if step == 0 :
object_list[key].flow = 1
object_list[key].statement_flow = 1
#if mode_exe == True :
#ScreenOne.procedure(object_list[key].name)
#title = object_list[key].name
else:
#ScreenOne.procedure(object_list[key].name)
title = object_list[key].name
if object_list[key].flow == 1 or object_list[key].statement_flow == 1:
pre_statement_flow = 1
else:
pre_statement_flow = 0
#document <= ("NAME: %s" % object_list[key].name)
#For binaries
if object_list[key].datatype == "bina":
if object_list[key].statement_value == "1B" :
pre_statement_flow = 1
object_list[key].statement_flow = 1
if object_list[key].statement_value == "0B" :
pre_statement_flow = 0
object_list[key].statement_flow = 0
checked = 1
#For binary evaluation
#if object_list[key].datatype == "bineval" :# and len(object_list[key].binary_list) >= 1:
if len(object_list[key].binary_list) >= 1:
pre_statement_flow = 0
subfactors = list()
#Convecrting variables into values
for binobj in object_list[key].binary_list :
for item in range(0,len(object_list)) :
thename = object_list[item].name
thename = str(thename)
#thename = thename[1:]
#thename = thename[:1]
#thename = re.sub("\s+", "", thename.strip())
#if object_list[item].name == binobj.replace(" ","") :
if thename == ("%s" % binobj.replace(" ","")) :
pass
#subfactors.append(int(int(object_list[item].value[:-1])))
sum1 = subfactors.count(1)
sum0 = subfactors.count(0)
if object_list[key].operator1 != None: # and object_list[key].statement_flow == None :
if object_list[key].operator1 == "equiv" and int(object_list[key].global_relative_variable1) == int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "geq" and int(object_list[key].global_relative_variable1) >= int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "leq" and int(object_list[key].global_relative_variable1) <= int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "no" and int(object_list[key].global_relative_variable1) != int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "g" and int(object_list[key].global_relative_variable1) > int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "l" and int(object_list[key].global_relative_variable1) < int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
else:
pre_statement_flow = 0
#print(object_list[key].statement_value)
object_list[key].statement_flow = int(pre_statement_flow)
checked = 1
#For binary equations:
#if object_list[key].datatype == "bineval" and len(object_list[key].binary_list) >= 1:
# #alert("begin B2")
#pre_statement_flow = 0
#subfactors = list()
#Convecrting variables into values
#for binobj in object_list[key].binary_list :
# for item in range(0,len(object_list)) :
# thename = object_list[item].name
# thename = str(thename)
# #thename = thename[1:]
# #thename = thename[:1]
# #thename = re.sub("\s+", "", thename.strip())
# #if object_list[item].name == binobj.replace(" ","") :
# if thename == ("%s" % binobj.replace(" ","")) :
# subfactors.append(int(object_list[item].statement_flow))
#sum1 = subfactors.count(1)
#sum0 = subfactors.count(0)
#pre_statement_flow = 1
#alert("begin B3")
if object_list[key].operator1 != None: # and object_list[key].statement_flow == None :
if object_list[key].operator1 == "equiv" and int(object_list[key].global_relative_variable1) == int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "geq" and int(object_list[key].global_relative_variable1) >= int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "leq" and int(object_list[key].global_relative_variable1) <= int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "no" and int(object_list[key].global_relative_variable1) != int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "g" and int(object_list[key].global_relative_variable1) > int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
elif object_list[key].operator1 == "l" and int(object_list[key].global_relative_variable1) < int(sum1):
pre_statement_flow = 1
object_list[key].statement_value = ("score: %s\nthreshold: %s" % (sum1, object_list[key].global_relative_variable1))
else:
pre_statement_flow = 0
#alert("begin B4")
object_list[key].statement_flow = int(pre_statement_flow)
object_list[key].flow = int(pre_statement_flow)
checked = 1
#alert("begin C1")
#For equations
if object_list[key].datatype == "valu":
#if object_list[key].statement_value != "" and object_list[key].operator1 == "" :
#comp = re.compile(r'(\d*)', re.IGNORECASE)
endstring = str()
string = (object_list[key].statement_value.replace(" ",""))
pattern = re.compile(r'([\=\+\-\/\*\(\)])')
iteratorUntouched = re.split(pattern, string)
eqlist = list()
for varWord in iteratorUntouched:
#print(varWord)
checked = 0
for objWord in range(len(object_list)):
thename = object_list[objWord].name
if thename == varWord:
eqlist.append(object_list[objWord].statement_value)
checked = 1
if checked == 0:
eqlist.append(varWord)
endstring = (("").join(eqlist))
endstring = str(endstring)
object_list[key].statement_value = endstring
#For float equations
if object_list[key].datatype == "valu" :
#else:
#comp = re.compile(r'(\d*)', re.IGNORECASE)
endstring = str()
string = (object_list[key].statement_value.replace(" ",""))
#pattern = re.compile(r'([\=\+\-\/\*\(\)])')
#iteratorFresh = re.split(pattern, string)
iteratorFresh = re.split("(?:(?:[^a-zA-Z])|(?:[^a-zA-Z]+))|(?:[^a-zA-Z]+)", string)
eqlist = list()
for varWord in iteratorFresh:
checked = 0
for objWord in range(len(object_list)):
thename = object_list[objWord].name
if thename == varWord:
eqlist.append(object_list[objWord].statement_value)
checked = 1
if checked == 0:
eqlist.append(varWord)
endstring = (("").join(eqlist))
#Bugprone euation line:
#endstring = str(eval(str(endstring)))
object_list[key].statement_value = endstring
#endnum = float()
#endnum = float(eval(str(endstring)))
endnum = endstring
pre_statement_flow = 0
try:
op = "failed"
if object_list[key].operator1 == "equiv" and int(object_list[key].global_relative_variable1) == int(str(endnum)):
op = "=="
pre_statement_flow = 1
elif object_list[key].operator1 == "leq" and int(object_list[key].global_relative_variable1) >= int((endnum)):
op = ">="
pre_statement_flow = 1
elif object_list[key].operator1 == "geq" and (int(object_list[key].global_relative_variable1) <= int(str(endnum))):
op = "<="
pre_statement_flow = 1
elif object_list[key].operator1 == "no" and int(object_list[key].global_relative_variable1) != int(str(endnum)):
op = "!="
pre_statement_flow = 1
elif object_list[key].operator1 == "g" and int(object_list[key].global_relative_variable1) < int(str(endnum)):
op = "<"
pre_statement_flow = 1
elif object_list[key].operator1 == "l" and int(object_list[key].global_relative_variable1) > int(str(endnum)):
op = ">"
pre_statement_flow = 1
else:
pre_statement_flow = 0
document <= html.BR()
document <= str("%s = "%object_list[key].name)
document <= html.BR()
except:
#endnum = float(eval(str(endstring)))
endnum = endstring
op = "failed"
if object_list[key].operator1 == "equiv" and float(object_list[key].global_relative_variable1) == float(str(endnum)):
op = "=="
pre_statement_flow = 1
elif object_list[key].operator1 == "leq" and float(object_list[key].global_relative_variable1) >= float((endnum)):
op = ">="
pre_statement_flow = 1
elif object_list[key].operator1 == "geq" and (float(object_list[key].global_relative_variable1) <= float(str(endnum))):
op = "<="
pre_statement_flow = 1
elif object_list[key].operator1 == "no" and float(object_list[key].global_relative_variable1) != float(str(endnum)):
op = "!="
pre_statement_flow = 1
elif object_list[key].operator1 == "g" and float(object_list[key].global_relative_variable1) < float(str(endnum)):
op = "<"
pre_statement_flow = 1
elif object_list[key].operator1 == "l" and float(object_list[key].global_relative_variable1) > float(str(endnum)):
op = ">"
pre_statement_flow = 1
else:
pre_statement_flow = 0
#document <= html.BR()
#document <= str("%s = "%object_list[key].name)
#document <= html.BR()
object_list[key].statement_flow = int(pre_statement_flow)
#alert("begin D1")
#For nonbinar-evaluations
if object_list[key].datatype == "nonbineval" :
#comp = re.compile(r'(\d*)', re.IGNORECASE)
endstring = str()
string = (object_list[key].statement_value.replace(" ",""))
pattern = re.compile(r'([\=\+\-\/\*\(\)])')
iteratorUntouched = re.split(pattern, string)
eqlist = list()
for varWord in iteratorUntouched:
#print(varWord)
checked = 0
for objWord in range(len(object_list)):
thename = object_list[objWord].name
if thename == varWord:
eqlist.append(object_list[objWord].statement_value)
checked = 1
if checked == 0:
eqlist.append(varWord)
endstring = (("").join(eqlist))
endstring = str(eval(str(endstring)))
object_list[key].statement_value = endstring
endnum = float()
endnum = float(eval(str(endstring)))
pre_statement_flow = 0
try:
if object_list[key].operator1 == "equiv" and int(object_list[key].global_relative_variable1) == int(str(endnum)):
#print ("%s == %s ; exe" % (int(object_list[key].global_relative_variable1), int(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "leq" and int(object_list[key].global_relative_variable1) >= int((endnum)):
#print ("%s >= %s ; exe" % (int(object_list[key].global_relative_variable1), int(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "geq" and (int(object_list[key].global_relative_variable1) <= int(str(endnum))):
#print ("%s <= %s ; exe" % (int(object_list[key].global_relative_variable1), int(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "no" and int(object_list[key].global_relative_variable1) != int(str(endnum)):
#print ("%s != %s ; exe" % (int(object_list[key].global_relative_variable1), int(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "g" and int(object_list[key].global_relative_variable1) < int(str(endnum)):
#print ("%s < %s ; exe" % (int(object_list[key].global_relative_variable1), int(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "l" and int(object_list[key].global_relative_variable1) > int(str(endnum)):
#print ("%s > %s ; exe" % (int(object_list[key].global_relative_variable1), int(str(endnum))))
pre_statement_flow = 1
else:
pre_statement_flow = 0
except:
endnum = float(eval(str(endstring)))
if object_list[key].operator1 == "equiv" and float(object_list[key].global_relative_variable1) == float(str(endnum)):
#print ("%s == %s ; exe" % (float(object_list[key].global_relative_variable1), float(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "leq" and float(object_list[key].global_relative_variable1) <= float((endnum)):
#print ("%s <= %s ; exe" % (float(object_list[key].global_relative_variable1), float(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "geq" and (float(object_list[key].global_relative_variable1) >= float(str(endnum))):
#print ("%s >= %s ; exe" % (float(object_list[key].global_relative_variable1), float(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "no" and float(object_list[key].global_relative_variable1) != float(str(endnum)):
#print ("%s != %s ; exe" % (float(object_list[key].global_relative_variable1), float(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "g" and float(object_list[key].global_relative_variable1) < float(str(endnum)):
#print ("%s < %s ; exe" % (float(object_list[key].global_relative_variable1), float(str(endnum))))
pre_statement_flow = 1
elif object_list[key].operator1 == "l" and float(object_list[key].global_relative_variable1) > float(str(endnum)):
#print ("%s > %s ; exe" % (float(object_list[key].global_relative_variable1), float(str(endnum))))
pre_statement_flow = 1
else:
pre_statement_flow = 0
#object_list[key].statement_flow = int(pre_statement_flow)
#alert("begin E1")
#if object_list[key].statement_flow == 0 or object_list[key].flow == 0 :
if object_list[key].flow != 1 :
#alert("A8 IF 0 name:%s ; datatype:%s ; flow:%s ; #statement_flow:%s" % (object_list[key].name, object_list[key].datatype, object_list[key].flow, object_list[key].statement_flow ))
#object_list[key].flow = 0
#object_list[key].statement_flow = 0
#pre_statement_flow = 0
object_list[key].statement_flow = int(pre_statement_flow)
object_list[key].flow = int(pre_statement_flow)
#if object_list[key].flow == 0 :
# object_list[key].statement_flow = 0
if step == 0 :
object_list[key].flow = 1
object_list[key].statement_flow = 1
#DELTETED GRAPHMODE-IF
if object_list[key].statement_flow == 0:
graph_string=""
if object_list[key].datatype == "bina" :
graph_string="0B"
if object_list[key].datatype == "bineval" :
graph_string=object_list[key].statement_value
if object_list[key].datatype == "nonbineval" :
graph_string=("score: %s" % (object_list[key].statement_value))
if object_list[key].datatype == "valu" :
graph_string=object_list[key].statement_value
#graphstr += ('"%s" [label="step %s: %s\\n%s", fillcolor=white, style=filled] ; \n' % (start,step+1,start,str(graph_string)))
title += ('"%s" [label="step %s: %s\\n%s", fillcolor=white, style=filled] ; \n' % (start,step+1,start,str(graph_string)))
print("1")
#graphstr += ('"%s" [label="step %s: %s\\n%s"] \n' % (start,step+1,start,graph_string))
#alert("before draw")
if object_list[key].statement_flow == 1:
graph_string=""
if object_list[key].datatype == "bina" :
graph_string="1B"
if object_list[key].datatype == "bineval" :
graph_string=object_list[key].statement_value
if object_list[key].datatype == "nonbineval" :
graph_string=("score: %s" % (object_list[key].statement_value))
if object_list[key].datatype == "valu" :
graph_string=object_list[key].statement_value
#graphstr += ('"%s" [label="step %s: %s\\n%s", fillcolor=yellow, style=filled] ; \n' % (start,step+1,start,str(graph_string)))
title = ('"%s" [label="step %s: %s\\n%s", fillcolor=yellow, style=filled] ; \n' % (start,step+1,start,str(graph_string)))
print("2")
#self.clear_widgets()
#self.reload()
#Clock.unschedule(self.__init__())
#return
#sm.add_widget(sc1)
#sm.current
#return sm
try:
for next_object in object_list[key].next_list :
if object_list[key].name != next_object :
graphstr += ('"%s" -> "%s" ; \n' % (start,next_object))
nextstates.append(next_object)
except:
pass
for start in starts :
for k in range(0,len(object_list)):
if object_list[k].name==start:
for nexting in object_list[k].next_list :
for l in range(0,len(object_list)):
if object_list[l].name == nexting :
nextstates.append(nexting)
if checked == 0:
for k in range(0,len(object_list)):
if object_list[k].name==start:
for nexting in object_list[k].next_list :
for l in range(0,len(object_list)):
if object_list[l].name == nexting :
if object_list[k].flow == 0 or object_list[k].statement_flow == 0 :
object_list[l].flow = 0
object_list[l].statement_flow = 0
for k in range(0,len(object_list)):
if object_list[k].name==start:
for nexting in object_list[k].next_list :
for l in range(0,len(object_list)):
if object_list[l].name == nexting :
if object_list[k].flow == 1 or object_list[k].statement_flow == 1 and object_list[l].flow != 0:
object_list[l].flow = 1
object_list[l].statement_flow = 1
checked = 0
del starts[:]
#starts = list()
for nexting in nextstates:
if nexting not in starts:
starts.append(nexting)
print(starts)
del nextstates[:]
# if mode_graph == True:
# graphstr += '}'
#try:
# graphstr += "}"
#except:
# pass
# open('lympha.dot', 'w').close()
# outputfile = open("lympha.dot", "w")
# outputfile.write(graphstr)
# outputfile.close()
# cmd = 'dot lympha.dot -Tps -o lympha.pdf'
# os.system(cmd)
CLI_filename = None
argv_len = None
filename = None
filenames = None
#filenames = list()
#starts = None
#steps = None
mode_graph = None
mode_state = None
filecheck = None
mode_exe = None
mode_show = None
mode_map = None
exe_list = None
show_list = None
map_list = None
#series = None
substates = None
nextstates = None
specs = None
global_relative_variable1 = None
global_relative_variable2 = None
operator1 = None
statement_flow = None
statement_value = None
#object_list = None
exe_objects = None
del CLI_filename, argv_len, filename, filenames, mode_graph, mode_state, filecheck, mode_exe, mode_show, mode_map, exe_list, show_list, map_list, substates, nextstates, specs, global_relative_variable1, global_relative_variable2, operator1, statement_flow, statement_value, exe_objects,# object_list, steps, starts,
sc1 = ScreenOne(name='screen1')
class TestClass(App):
def build(self):
#mainfunc()
global sc1
global sm
global filenames
global filecheck
global mode_exe
global starts
global steps
global prefilenames
#global prestarts
sys.argv = list()
sys.argv = ["-f", "CRB65.lympha","-steps", "2", "-exe", "-start", "crepitation."]
argv_len=len(sys.argv)
for x in range(0, argv_len):
if sys.argv[x] == "-f":
filename = sys.argv[x+1]
prefilenames.append(filename)
filecheck = True
if sys.argv[x] == "-steps":
steps = int(sys.argv[x+1])
if sys.argv[x] == "-start":
prestarts.append(sys.argv[x+1])
starts.append(sys.argv[x+1])
global sc1
#steps = 1
lexer()
#Clock.schedule_interval(sc1.oking, 0.2)
nextstates = list()
#mapfunc()
###???
#sm.add_widget(sc1)
Clock.schedule_interval(sc1.update , 0.2)
return sc1
if __name__ == "__main__":
TestClass().run()
|
py | b40c2e5a25c25f2b3f5f01e2b6ea1a56cc297543 | """A home for mathematical operations which are used multiple times in this package."""
from numpy import ndarray, newaxis
def jacobian_of_f_squared_times_g(*,
f: ndarray, f_jacobian: ndarray,
g: ndarray, g_jacobian: ndarray) -> ndarray:
"""Given two functions f and g, along with their Jacobians, returns the Jacobian of the function f^2 * g.
Parameters
----------
f
A 1D array whose :math:`i`-th element is the value of the function :math:`f` at point :math:`x_i`.
f_jacobian
A 2D array whose :math:`(i,j)`-th element is the :math:`j`-th component of the Jacobian of :math:`f` at point
:math:`x_i`.
g
A 1D array whose :math:`i`-th element is the value of the function :math:`g` at point :math:`x_i`.
g_jacobian
A 2D array whose :math:`(i,j)`-th element is the :math:`j`-th component of the Jacobian of :math:`g` at point
:math:`x_i`.
Returns
-------
jacobian : ndarray
A 2D array of shape (num_points, num_dimensions). The :math:`(i, j)`-th element is the :math:`j`-th component of
the Jacobian of :math:`f^2 g` at point :math:`x_i`.
Notes
-----
The required derivative is as follows:
.. math::
\\frac{\\partial f^2 g}{\\partial x_j} = 2 f g \\frac{\\partial f}{\\partial x_j}
+ f^2 \\frac{\\partial g}{\\partial x_j}
"""
assert f.ndim == g.ndim == 1, "Function data must be a 1-dimensional array"
assert f_jacobian.ndim == g_jacobian.ndim == 2, "Function Jacobian data must be a 2-dimensional array"
# The Jacobian has dimensions (num_points, num_dimensions). For NumPy to broadcast the calculations
# appropriately, we need to augment our 1D variables with a new axis.
f, g = f[:, newaxis], g[:, newaxis]
jacobian = 2 * f * g * f_jacobian + g_jacobian * f ** 2
return jacobian
def hessian_of_f_squared_times_g(*,
f: ndarray, f_jacobian: ndarray, f_hessian: ndarray,
g: ndarray, g_jacobian: ndarray, g_hessian: ndarray) -> ndarray:
"""Given two functions f and g, along with their Jacobian and Hessian, returns the Hessian of the function f^2 * g.
Parameters
----------
f
A 1D array whose :math:`i`-th element is the value of the function :math:`f` at point :math:`x_i`.
f_jacobian
A 2D array whose :math:`(i,j)`-th element is the :math:`j`-th component of the Jacobian of :math:`f` at point
:math:`x_i`.
f_hessian
A 3D array whose :math:`(i,j,k)`-th element is the :math:`(j,k)`-th mixed partial derivative of :math:`f` at
point :math:`x_i`.
g
A 1D array whose :math:`i`-th element is the value of the function :math:`g` at point :math:`x_i`.
g_jacobian
A 2D array whose :math:`(i,j)`-th element is the :math:`j`-th component of the Jacobian of :math:`g` at point
:math:`x_i`.
g_hessian
A 3D array whose :math:`(i,j,k)`-th element is the :math:`(j,k)`-th mixed partial derivative of :math:`g` at
point :math:`x_i`.
Returns
-------
hessian : ndarray
A 3D array of shape (num_points, num_dimensions, num_dimensions). The :math:`(i, j, k)`-th element is the
:math:`(j, k)`-th mixed partial derivative of :math:`f^2 g` at point :math:`x_i`.
Notes
-----
The required derivatives are as follows:
.. math::
\\frac{\\partial f^2 g}{\\partial x_j} & = & 2 f g \\frac{\\partial f}{\\partial x_j}
+ f^2 \\frac{\\partial g}{\\partial x_j} \\\\
\\frac{\\partial^2 f^2 g}{\\partial x_j \\partial x_k} & = &
2 f \\left( g \\frac{\\partial^2 f}{\\partial x_j \\partial x_k}
+ \\frac{\\partial g}{\\partial x_j} \\frac{\\partial f}{\\partial x_k}
+ \\frac{\\partial f}{\\partial x_j} \\frac{\\partial g}{\\partial x_k} \\right) \\\\
& & + 2 g \\frac{\\partial f}{\\partial x_j} \\frac{\\partial f}{\\partial x_k}
+ f^2 \\frac{\\partial^2 f}{\\partial x_j \\partial x_k}
"""
assert f.ndim == g.ndim == 1, "Function data must be a 1-dimensional array"
assert f_jacobian.ndim == g_jacobian.ndim == 2, "Function Jacobian data must be a 2-dimensional array"
assert f_hessian.ndim == g_hessian.ndim == 3, "Function Hessian data must be a 3-dimensional array"
# The Hessian has dimensions (num_points, num_dimensions, num_dimensions). For NumPy to broadcast the calculations
# appropriately, we need to augment our 1D variables with new axes.
f, g = f[:, newaxis, newaxis], g[:, newaxis, newaxis]
# The (i,j,k)-th element of these arrays is the j-th component of the Jacobian at x_i (the k axis has size 1).
f_jacobian_dxj, g_jacobian_dxj = f_jacobian[:, :, newaxis], g_jacobian[:, :, newaxis]
# The (i,j,k)-th element of these arrays is the k-th component of the Jacobian at x_i (the j axis has size 1).
f_jacobian_dxk, g_jacobian_dxk = f_jacobian[:, newaxis, :], g_jacobian[:, newaxis, :]
hessian = \
2 * f * (
f_hessian * g +
g_jacobian_dxj * f_jacobian_dxk +
f_jacobian_dxj * g_jacobian_dxk
) + 2 * g * f_jacobian_dxj * f_jacobian_dxk \
+ g_hessian * f ** 2
return hessian
|
py | b40c2ed80b766fd8747457101d15b23efccccc53 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Creating test files for the unit tests of PROPOSAL
This internal package is thought to be used by developers and
maintainers of PROPOSAL only.
You can execute every module separate to create test files
for the specific tests or run __init__.py to execute all modules.
Therfore you must be in the tests/gen_testfiles directory, because
propagation.py uses the config_ice.json located in the
resources directory which is hard coded in that module.
"""
import multiprocessing
import sys
import os
import warnings
import subprocess
import bremsstrahlung
import continous_randomization
import epairproduction
import ionization
import photonuclear
import propagation
import scattering
import sector
def main():
print("There are %d CPUs on this machine" % multiprocessing.cpu_count())
number_processes = 2
dir_name = "TestFiles/"
tar_name = "TestFiles.tar.gz"
try:
number_processes = int(sys.argv[1])
except IndexError:
pass
except ValueError:
warnings.warn("first argument must be an integer. (Number of processes to ues)")
pass
try:
os.makedirs(dir_name)
print("Directory {} created".format(dir_name))
except OSError:
print("Directory {} already exists".format(dir_name))
pool = multiprocessing.Pool(number_processes)
results = []
pool.apply_async(bremsstrahlung.main, (dir_name, ))
pool.apply_async(continous_randomization.main, (dir_name, ))
pool.apply_async(epairproduction.main, (dir_name, ))
pool.apply_async(ionization.main, (dir_name, ))
pool.apply_async(photonuclear.main, (dir_name, ))
pool.apply_async(propagation.main, (dir_name, ))
pool.apply_async(scattering.main, (dir_name, ))
pool.apply_async(sector.main, (dir_name, ))
pool.close()
pool.join()
print("all threads are joined")
p = subprocess.Popen(['tar', '-czf', tar_name, dir_name])
p.communicate()
print("compressed test files {}".format(tar_name))
p = subprocess.Popen(['rm', '-r', dir_name])
p.communicate()
print("Directory {} removed".format(dir_name))
if __name__ == "__main__":
main()
|
py | b40c303e87892deb2c44eb343e8a851135a30036 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'ResourceSkuResponse',
'SignalRCorsSettingsResponse',
'SignalRFeatureResponse',
]
@pulumi.output_type
class ResourceSkuResponse(dict):
"""
The billing information of the SignalR resource.
"""
def __init__(__self__, *,
name: str,
capacity: Optional[int] = None,
family: Optional[str] = None,
size: Optional[str] = None,
tier: Optional[str] = None):
"""
The billing information of the SignalR resource.
:param str name: The name of the SKU. Required.
Allowed values: Standard_S1, Free_F1
:param int capacity: Optional, integer. The unit count of SignalR resource. 1 by default.
If present, following values are allowed:
Free: 1
Standard: 1,2,5,10,20,50,100
:param str family: Optional string. For future use.
:param str size: Optional string. For future use.
:param str tier: Optional tier of this particular SKU. 'Standard' or 'Free'.
`Basic` is deprecated, use `Standard` instead.
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the SKU. Required.
Allowed values: Standard_S1, Free_F1
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def capacity(self) -> Optional[int]:
"""
Optional, integer. The unit count of SignalR resource. 1 by default.
If present, following values are allowed:
Free: 1
Standard: 1,2,5,10,20,50,100
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def family(self) -> Optional[str]:
"""
Optional string. For future use.
"""
return pulumi.get(self, "family")
@property
@pulumi.getter
def size(self) -> Optional[str]:
"""
Optional string. For future use.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
Optional tier of this particular SKU. 'Standard' or 'Free'.
`Basic` is deprecated, use `Standard` instead.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SignalRCorsSettingsResponse(dict):
"""
Cross-Origin Resource Sharing (CORS) settings.
"""
def __init__(__self__, *,
allowed_origins: Optional[Sequence[str]] = None):
"""
Cross-Origin Resource Sharing (CORS) settings.
:param Sequence[str] allowed_origins: Gets or sets the list of origins that should be allowed to make cross-origin calls (for example: http://example.com:12345). Use "*" to allow all. If omitted, allow all by default.
"""
if allowed_origins is not None:
pulumi.set(__self__, "allowed_origins", allowed_origins)
@property
@pulumi.getter(name="allowedOrigins")
def allowed_origins(self) -> Optional[Sequence[str]]:
"""
Gets or sets the list of origins that should be allowed to make cross-origin calls (for example: http://example.com:12345). Use "*" to allow all. If omitted, allow all by default.
"""
return pulumi.get(self, "allowed_origins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SignalRFeatureResponse(dict):
"""
Feature of a SignalR resource, which controls the SignalR runtime behavior.
"""
def __init__(__self__, *,
flag: str,
value: str,
properties: Optional[Mapping[str, str]] = None):
"""
Feature of a SignalR resource, which controls the SignalR runtime behavior.
:param str flag: FeatureFlags is the supported features of Azure SignalR service.
- ServiceMode: Flag for backend server for SignalR service. Values allowed: "Default": have your own backend server; "Serverless": your application doesn't have a backend server; "Classic": for backward compatibility. Support both Default and Serverless mode but not recommended; "PredefinedOnly": for future use.
- EnableConnectivityLogs: "true"/"false", to enable/disable the connectivity log category respectively.
:param str value: Value of the feature flag. See Azure SignalR service document https://docs.microsoft.com/azure/azure-signalr/ for allowed values.
:param Mapping[str, str] properties: Optional properties related to this feature.
"""
pulumi.set(__self__, "flag", flag)
pulumi.set(__self__, "value", value)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def flag(self) -> str:
"""
FeatureFlags is the supported features of Azure SignalR service.
- ServiceMode: Flag for backend server for SignalR service. Values allowed: "Default": have your own backend server; "Serverless": your application doesn't have a backend server; "Classic": for backward compatibility. Support both Default and Serverless mode but not recommended; "PredefinedOnly": for future use.
- EnableConnectivityLogs: "true"/"false", to enable/disable the connectivity log category respectively.
"""
return pulumi.get(self, "flag")
@property
@pulumi.getter
def value(self) -> str:
"""
Value of the feature flag. See Azure SignalR service document https://docs.microsoft.com/azure/azure-signalr/ for allowed values.
"""
return pulumi.get(self, "value")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, str]]:
"""
Optional properties related to this feature.
"""
return pulumi.get(self, "properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
py | b40c3169b3256d3b2120a9449cbc3fbd200ffe7a | """
sklearn-compatible implementation of spatially structured learners (
TV-L1, Graph-Net, etc.)
"""
# Author: DOHMATOB Elvis Dopgima,
# PIZARRO Gaspar,
# VAROQUAUX Gael,
# GRAMFORT Alexandre,
# EICKENBERG Michael,
# THIRION Bertrand
# License: simplified BSD
from distutils.version import LooseVersion
import sklearn
import warnings
import numbers
import time
import sys
from functools import partial
import numpy as np
from scipy import stats, ndimage
from sklearn.base import RegressorMixin
from sklearn.utils.extmath import safe_sparse_dot
try:
from sklearn.utils import atleast2d_or_csr
except ImportError: # sklearn 0.15
from sklearn.utils import check_array as atleast2d_or_csr
from sklearn.linear_model.base import LinearModel, center_data
from sklearn.feature_selection import (SelectPercentile, f_regression,
f_classif)
from sklearn.externals.joblib import Memory, Parallel, delayed
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import accuracy_score
from ..input_data.masker_validation import check_embedded_nifti_masker
from .._utils.param_validation import _adjust_screening_percentile
from .._utils.fixes import check_X_y
from .._utils.fixes import check_cv
from .._utils.compat import _basestring
from .._utils.cache_mixin import CacheMixin
from .objective_functions import _unmask
from .space_net_solvers import (tvl1_solver, _graph_net_logistic,
_graph_net_squared_loss)
def _crop_mask(mask):
"""Crops input mask to produce tighter (i.e smaller) bounding box with
the same support (active voxels)"""
idx = np.where(mask)
if idx[0].size == 0:
raise ValueError("Empty mask: if you have given a mask, it is "
"empty, and if you have not given a mask, the "
"mask-extraction routines have failed. Please "
"provide an appropriate mask.")
i_min = max(idx[0].min() - 1, 0)
i_max = idx[0].max()
j_min = max(idx[1].min() - 1, 0)
j_max = idx[1].max()
k_min = max(idx[2].min() - 1, 0)
k_max = idx[2].max()
return mask[i_min:i_max + 1, j_min:j_max + 1, k_min:k_max + 1]
def _univariate_feature_screening(
X, y, mask, is_classif, screening_percentile, smoothing_fwhm=2.):
"""
Selects the most import features, via a univariate test
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Design matrix.
y : ndarray, shape (n_samples,)
Response Vector.
mask: ndarray or booleans, shape (nx, ny, nz)
Mask defining brain Rois.
is_classif: bool
Flag telling whether the learning task is classification or regression.
screening_percentile : float in the closed interval [0., 100.]
Only the `screening_percentile * 100" percent most import voxels will
be retained.
smoothing_fwhm : float, optional (default 2.)
FWHM for isotropically smoothing the data X before F-testing. A value
of zero means "don't smooth".
Returns
-------
X_: ndarray, shape (n_samples, n_features_)
Reduced design matrix with only columns corresponding to the voxels
retained after screening.
mask_ : ndarray of booleans, shape (nx, ny, nz)
Mask with support reduced to only contain voxels retained after
screening.
support : ndarray of ints, shape (n_features_,)
Support of the screened mask, as a subset of the support of the
original mask.
"""
# smooth the data (with isotropic Gaussian kernel) before screening
if smoothing_fwhm > 0.:
sX = np.empty(X.shape)
for sample in range(sX.shape[0]):
sX[sample] = ndimage.gaussian_filter(
_unmask(X[sample].copy(), # avoid modifying X
mask), (smoothing_fwhm, smoothing_fwhm,
smoothing_fwhm))[mask]
else:
sX = X
# do feature screening proper
selector = SelectPercentile(f_classif if is_classif else f_regression,
percentile=screening_percentile).fit(sX, y)
support = selector.get_support()
# erode and then dilate mask, thus obtaining a "cleaner" version of
# the mask on which a spatial prior actually makes sense
mask_ = mask.copy()
mask_[mask] = (support > 0)
mask_ = ndimage.binary_dilation(ndimage.binary_erosion(
mask_)).astype(np.bool)
mask_[np.logical_not(mask)] = 0
support = mask_[mask]
X = X[:, support]
return X, mask_, support
def _space_net_alpha_grid(X, y, eps=1e-3, n_alphas=10, l1_ratio=1.,
logistic=False):
"""Compute the grid of alpha values for TV-L1 and Graph-Net.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Training data (design matrix).
y : ndarray, shape (n_samples,)
Target / response vector.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is purely a spatial prior
(Graph-Net, TV, etc.). ``For l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1
and a spatial prior.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path.
logistic : bool, optional (default False)
Indicates where the underlying loss function is logistic.
"""
if logistic:
# Computes the theoretical upper bound for the overall
# regularization, as derived in "An Interior-Point Method for
# Large-Scale l1-Regularized Logistic Regression", by Koh, Kim,
# Boyd, in Journal of Machine Learning Research, 8:1519-1555,
# July 2007.
# url: http://www.stanford.edu/~boyd/papers/pdf/l1_logistic_reg.pdf
m = float(y.size)
m_plus = float(y[y == 1].size)
m_minus = float(y[y == -1].size)
b = np.zeros_like(y)
b[y == 1] = m_minus / m
b[y == -1] = - m_plus / m
alpha_max = np.max(np.abs(X.T.dot(b)))
# tt may happen that b is in the kernel of X.T!
if alpha_max == 0.:
alpha_max = np.abs(np.dot(X.T, y)).max()
else:
alpha_max = np.abs(np.dot(X.T, y)).max()
# prevent alpha_max from exploding when l1_ratio = 0
if l1_ratio == 0.:
l1_ratio = 1e-3
alpha_max /= l1_ratio
if n_alphas == 1:
return np.array([alpha_max])
alpha_min = alpha_max * eps
return np.logspace(np.log10(alpha_min), np.log10(alpha_max),
num=n_alphas)[::-1]
class _EarlyStoppingCallback(object):
"""Out-of-bag early stopping
A callable that returns True when the test error starts
rising. We use a Spearman correlation (between X_test.w and y_test)
for scoring.
"""
def __init__(self, X_test, y_test, is_classif, debias=False, verbose=0):
self.X_test = X_test
self.y_test = y_test
self.is_classif = is_classif
self.debias = debias
self.verbose = verbose
self.tol = -1e-4 if self.is_classif else -1e-2
self.test_scores = []
self.counter = 0.
def __call__(self, variables):
"""The callback proper """
# misc
if not isinstance(variables, dict):
variables = dict(w=variables)
self.counter += 1
w = variables['w']
# use Spearman score as stopping criterion
score = self.test_score(w)[0]
self.test_scores.append(score)
if not (self.counter > 20 and (self.counter % 10) == 2):
return
# check whether score increased on average over last 5 iterations
if len(self.test_scores) > 4:
if np.mean(np.diff(self.test_scores[-5:][::-1])) >= self.tol:
if self.verbose:
if self.verbose > 1:
print('Early stopping. Test score: %.8f %s' % (
score, 40 * '-'))
else:
sys.stderr.write('.')
return True
if self.verbose > 1:
print('Test score: %.8f' % score)
return False
def _debias(self, w):
""""Debias w by rescaling the coefficients by a fixed factor.
Precisely, the scaling factor is: <y_pred, y_test> / ||y_test||^2.
"""
y_pred = np.dot(self.X_test, w)
scaling = np.dot(y_pred, y_pred)
if scaling > 0.:
scaling = np.dot(y_pred, self.y_test) / scaling
w *= scaling
return w
def test_score(self, w):
"""Compute test score for model, given weights map `w`.
We use correlations between linear prediction and
ground truth (y_test).
We return 2 scores for model selection: one is the Spearman
correlation, which captures ordering between input and
output, but tends to have 'flat' regions. The other
is the Pearson correlation, that we can use to disambiguate
between regions with equivalent Spearman correlation.
"""
if self.is_classif:
w = w[:-1]
if w.ptp() == 0:
# constant map, there is nothing
return (-np.inf, -np.inf)
y_pred = np.dot(self.X_test, w)
spearman_score = stats.spearmanr(y_pred, self.y_test)[0]
pearson_score = np.corrcoef(y_pred, self.y_test)[1, 0]
if self.is_classif:
return spearman_score, pearson_score
else:
return pearson_score, spearman_score
def path_scores(solver, X, y, mask, alphas, l1_ratios, train, test,
solver_params, is_classif=False, n_alphas=10, eps=1E-3,
key=None, debias=False, Xmean=None,
screening_percentile=20., verbose=1):
"""Function to compute scores of different alphas in regression and
classification used by CV objects
Parameters
----------
X : 2D array of shape (n_samples, n_features)
Design matrix, one row per sample point.
y : 1D array of length n_samples
Response vector; one value per sample.
mask : 3D arrays of boolean
Mask defining brain regions that we work on.
alphas : list of floats
List of regularization parameters being considered.
train : array or list of integers
List of indices for the train samples.
test : array or list of integers
List of indices for the test samples.
l1_ratio : float in the interval [0, 1]; optional (default .5)
Constant that mixes L1 and TV (resp. Graph-Net) penalization.
l1_ratio == 0: just smooth. l1_ratio == 1: just lasso.
eps : float, optional (default 1e-3)
Length of the path. For example, ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional (default 10).
Generate this number of alphas per regularization path.
This parameter is mutually exclusive with the `alphas` parameter.
solver : function handle
See for example tv.TVl1Classifier documentation.
solver_params: dict
Dictionary of param-value pairs to be passed to solver.
"""
if l1_ratios is None:
raise ValueError("l1_ratios must be specified!")
# misc
_, n_features = X.shape
verbose = int(verbose if verbose is not None else 0)
# Univariate feature screening. Note that if we have only as few as 100
# features in the mask's support, then we should use all of them to
# learn the model i.e disable this screening)
do_screening = (n_features > 100) and screening_percentile < 100.
if do_screening:
X, mask, support = _univariate_feature_screening(
X, y, mask, is_classif, screening_percentile)
# crop the mask to have a tighter bounding box
mask = _crop_mask(mask)
# get train and test data
X_train, y_train = X[train].copy(), y[train].copy()
X_test, y_test = X[test].copy(), y[test].copy()
# it is essential to center the data in regression
X_train, y_train, _, y_train_mean, _ = center_data(
X_train, y_train, fit_intercept=True, normalize=False,
copy=False)
# misc
if isinstance(l1_ratios, numbers.Number):
l1_ratios = [l1_ratios]
l1_ratios = sorted(l1_ratios)[::-1] # from large to small l1_ratios
best_score = -np.inf
best_secondary_score = -np.inf
best_l1_ratio = l1_ratios[0]
best_alpha = None
best_init = None
all_test_scores = []
if len(test) > 0.:
# do l1_ratio path
for l1_ratio in l1_ratios:
this_test_scores = []
# make alpha grid
if alphas is None:
alphas_ = _space_net_alpha_grid(
X_train, y_train, l1_ratio=l1_ratio, eps=eps,
n_alphas=n_alphas, logistic=is_classif)
else:
alphas_ = alphas
alphas_ = sorted(alphas_)[::-1] # from large to small l1_ratios
# do alpha path
if best_alpha is None:
best_alpha = alphas_[0]
init = None
for alpha in alphas_:
# setup callback mechanism for early stopping
early_stopper = _EarlyStoppingCallback(
X_test, y_test, is_classif=is_classif, debias=debias,
verbose=verbose)
w, _, init = solver(
X_train, y_train, alpha, l1_ratio, mask=mask, init=init,
callback=early_stopper, verbose=max(verbose - 1, 0.),
**solver_params)
# We use 2 scores for model selection: the second one is to
# disambiguate between regions of equivalent Spearman
# correlations
score, secondary_score = early_stopper.test_score(w)
this_test_scores.append(score)
if (np.isfinite(score) and
(score > best_score
or (score == best_score and
secondary_score > best_secondary_score))):
best_secondary_score = secondary_score
best_score = score
best_l1_ratio = l1_ratio
best_alpha = alpha
best_init = init.copy()
all_test_scores.append(this_test_scores)
else:
if alphas is None:
alphas_ = _space_net_alpha_grid(
X_train, y_train, l1_ratio=best_l1_ratio, eps=eps,
n_alphas=n_alphas, logistic=is_classif)
else:
alphas_ = alphas
best_alpha = alphas_[0]
# re-fit best model to high precision (i.e without early stopping, etc.)
best_w, _, init = solver(X_train, y_train, best_alpha, best_l1_ratio,
mask=mask, init=best_init,
verbose=max(verbose - 1, 0), **solver_params)
if debias:
best_w = _EarlyStoppingCallback(
X_test, y_test, is_classif=is_classif, debias=debias,
verbose=verbose)._debias(best_w)
if len(test) == 0.:
all_test_scores.append(np.nan)
# unmask univariate screening
if do_screening:
w_ = np.zeros(len(support))
if is_classif:
w_ = np.append(w_, best_w[-1])
w_[:-1][support] = best_w[:-1]
else:
w_[support] = best_w
best_w = w_
if len(best_w) == n_features:
if Xmean is None:
Xmean = np.zeros(n_features)
best_w = np.append(best_w, 0.)
all_test_scores = np.array(all_test_scores)
return (all_test_scores, best_w, best_alpha, best_l1_ratio, alphas_,
y_train_mean, key)
class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin):
"""
Regression and classification learners with sparsity and spatial priors
`SpaceNet` implements Graph-Net and TV-L1 priors /
penalties. Thus, the penalty is a sum an L1 term and a spatial term. The
aim of such a hybrid prior is to obtain weights maps which are structured
(due to the spatial prior) and sparse (enforced by L1 norm).
Parameters
----------
penalty : string, optional (default 'graph-net')
Penalty to used in the model. Can be 'graph-net' or 'tv-l1'.
loss : string, optional (default "mse")
Loss to be used in the model. Must be an one of "mse", or "logistic".
is_classif : bool, optional (default False)
Flag telling whether the learning task is classification or regression.
l1_ratios : float or list of floats in the interval [0, 1];
optional (default .5)
Constant that mixes L1 and spatial prior terms in penalization.
l1_ratio == 1 corresponds to pure LASSO. The larger the value of this
parameter, the sparser the estimated weights map. If list is provided,
then the best value will be selected by cross-validation.
alphas : float or list of floats, optional (default None)
Choices for the constant that scales the overall regularization term.
This parameter is mutually exclusive with the `n_alphas` parameter.
If None or list of floats is provided, then the best value will be
selected by cross-validation.
n_alphas : int, optional (default 10).
Generate this number of alphas per regularization path.
This parameter is mutually exclusive with the `alphas` parameter.
eps : float, optional (default 1e-3)
Length of the path. For example, ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
mask : filename, niimg, NiftiMasker instance, optional default None)
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is it will be computed
automatically by a NiftiMasker.
target_affine : 3x3 or 4x4 matrix, optional (default None)
This parameter is passed to image.resample_img. An important use-case
of this parameter is for downsampling the input data to a coarser
resolution (to speed of the model fit). Please see the related
documentation for details.
target_shape : 3-tuple of integers, optional (default None)
This parameter is passed to image.resample_img. Please see the
related documentation for details.
low_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
high_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
t_r : float, optional (default None)
This parameter is passed to signal.clean. Please see the related
documentation for details.
screening_percentile : float in the interval [0, 100]; Optional (
default 20)
Percentile value for ANOVA univariate feature selection. A value of
100 means 'keep all features'. This percentile is is expressed
w.r.t the volume of a standard (MNI152) brain, and so is corrected
at runtime to correspond to the volume of the user-supplied mask
(which is typically smaller). If '100' is given, all the features
are used, regardless of the number of voxels.
standardize : bool, optional (default True):
If set, then the data (X, y) are centered to have mean zero along
axis 0. This is here because nearly all linear models will want
their data to be centered.
fit_intercept : bool, optional (default True)
Fit or not an intercept.
max_iter : int (default 1000)
Defines the iterations for the solver.
tol : float, optional (default 5e-4)
Defines the tolerance for convergence for the backend FISTA solver.
verbose : int, optional (default 1)
Verbosity level.
n_jobs : int, optional (default 1)
Number of jobs in solving the sub-problems.
memory: instance of joblib.Memory or string
Used to cache the masking process.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
memory_level: integer, optional (default 1)
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
cv : int, a cv generator instance, or None (default 8)
The input specifying which cross-validation generator to use.
It can be an integer, in which case it is the number of folds in a
KFold, None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
debias : bool, optional (default False)
If set, then the estimated weights maps will be debiased.
Attributes
----------
`alpha_` : float
Best alpha found by cross-validation.
`coef_` : ndarray, shape (n_classes-1, n_features)
Coefficient of the features in the decision function.
`masker_` : instance of NiftiMasker
The nifti masker used to mask the data.
`mask_img_` : Nifti like image
The mask of the data. If no mask was supplied by the user,
this attribute is the mask image computed automatically from the
data `X`.
`intercept_` : narray, shape (nclasses -1,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True.
`cv_` : list of pairs of lists
List of the (n_folds,) folds. For the corresponding fold,
each pair is composed of two lists of indices,
one for the train samples and one for the test samples.
`cv_scores_` : ndarray, shape (n_alphas, n_folds) or
(n_l1_ratios, n_alphas, n_folds)
Scores (misclassification) for each alpha, and on each fold
`screening_percentile_` : float
Screening percentile corrected according to volume of mask,
relative to the volume of standard brain.
"""
SUPPORTED_PENALTIES = ["graph-net", "tv-l1"]
SUPPORTED_LOSSES = ["mse", "logistic"]
def __init__(self, penalty="graph-net", is_classif=False, loss=None,
l1_ratios=.5, alphas=None, n_alphas=10, mask=None,
target_affine=None, target_shape=None, low_pass=None,
high_pass=None, t_r=None, max_iter=1000, tol=5e-4,
memory=None, memory_level=1, standardize=True, verbose=1,
mask_args=None,
n_jobs=1, eps=1e-3, cv=8, fit_intercept=True,
screening_percentile=20., debias=False):
self.penalty = penalty
self.is_classif = is_classif
self.loss = loss
self.n_alphas = n_alphas
self.eps = eps
self.l1_ratios = l1_ratios
self.alphas = alphas
self.mask = mask
self.fit_intercept = fit_intercept
self.memory = memory
self.memory_level = memory_level
self.max_iter = max_iter
self.tol = tol
self.verbose = verbose
self.standardize = standardize
self.n_jobs = n_jobs
self.cv = cv
self.screening_percentile = screening_percentile
self.debias = debias
self.low_pass = low_pass
self.high_pass = high_pass
self.t_r = t_r
self.target_affine = target_affine
self.target_shape = target_shape
self.mask_args = mask_args
# sanity check on params
self.check_params()
def check_params(self):
"""Makes sure parameters are sane"""
if self.l1_ratios is not None:
l1_ratios = self.l1_ratios
if isinstance(l1_ratios, numbers.Number):
l1_ratios = [l1_ratios]
for l1_ratio in l1_ratios:
if not 0 <= l1_ratio <= 1.:
raise ValueError(
"l1_ratio must be in the interval [0, 1]; got %g" % (
l1_ratio))
elif l1_ratio == 0. or l1_ratio == 1.:
warnings.warn(
("Specified l1_ratio = %g. It's advised to only "
"specify values of l1_ratio strictly between 0 "
"and 1." % l1_ratio))
if not (0. <= self.screening_percentile <= 100.):
raise ValueError(
("screening_percentile should be in the interval"
" [0, 100], got %g" % self.screening_percentile))
if self.penalty not in self.SUPPORTED_PENALTIES:
raise ValueError(
"'penalty' parameter must be one of %s%s or %s; got %s" % (
",".join(self.SUPPORTED_PENALTIES[:-1]), "," if len(
self.SUPPORTED_PENALTIES) > 2 else "",
self.SUPPORTED_PENALTIES[-1], self.penalty))
if not (self.loss is None or self.loss in self.SUPPORTED_LOSSES):
raise ValueError(
"'loss' parameter must be one of %s%s or %s; got %s" % (
",".join(self.SUPPORTED_LOSSES[:-1]), "," if len(
self.SUPPORTED_LOSSES) > 2 else "",
self.SUPPORTED_LOSSES[-1], self.loss))
if self.loss is not None and not self.is_classif and (
self.loss == "logistic"):
raise ValueError(
("'logistic' loss is only available for classification "
"problems."))
def _set_coef_and_intercept(self, w):
"""Sets the loadings vector (coef) and the intercept of the fitted
model."""
self.w_ = np.array(w)
if self.w_.ndim == 1:
self.w_ = self.w_[np.newaxis, :]
self.coef_ = self.w_[:, :-1]
if self.is_classif:
self.intercept_ = self.w_[:, -1]
else:
self._set_intercept(self.Xmean_, self.ymean_, self.Xstd_)
def fit(self, X, y):
"""Fit the learner
Parameters
----------
X : list of Niimg-like objects
See http://nilearn.github.io/manipulating_images/input_output.html
Data on which model is to be fitted. If this is a list,
the affine is considered the same for all.
y : array or list of length n_samples
The dependent variable (age, sex, QI, etc.).
Notes
-----
self : `SpaceNet` object
Model selection is via cross-validation with bagging.
"""
# misc
self.check_params()
if self.memory is None or isinstance(self.memory, _basestring):
self.memory_ = Memory(self.memory,
verbose=max(0, self.verbose - 1))
else:
self.memory_ = self.memory
if self.verbose:
tic = time.time()
# nifti masking
self.masker_ = check_embedded_nifti_masker(self, multi_subject=False)
X = self.masker_.fit_transform(X)
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
# misc
self.Xmean_ = X.mean(axis=0)
self.Xstd_ = X.std(axis=0)
self.Xstd_[self.Xstd_ < 1e-8] = 1
self.mask_img_ = self.masker_.mask_img_
self.mask_ = self.mask_img_.get_data().astype(np.bool)
n_samples, _ = X.shape
y = np.array(y).copy()
l1_ratios = self.l1_ratios
if isinstance(l1_ratios, numbers.Number):
l1_ratios = [l1_ratios]
alphas = self.alphas
if isinstance(alphas, numbers.Number):
alphas = [alphas]
if self.loss is not None:
loss = self.loss
elif self.is_classif:
loss = "logistic"
else:
loss = "mse"
# set backend solver
if self.penalty.lower() == "graph-net":
if not self.is_classif or loss == "mse":
solver = _graph_net_squared_loss
else:
solver = _graph_net_logistic
else:
if not self.is_classif or loss == "mse":
solver = partial(tvl1_solver, loss="mse")
else:
solver = partial(tvl1_solver, loss="logistic")
# generate fold indices
case1 = (None in [alphas, l1_ratios]) and self.n_alphas > 1
case2 = (alphas is not None) and min(len(l1_ratios), len(alphas)) > 1
if case1 or case2:
if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'):
# scikit-learn >= 0.18
self.cv_ = list(check_cv(
self.cv, y=y, classifier=self.is_classif).split(X, y))
else:
# scikit-learn < 0.18
self.cv_ = list(check_cv(self.cv, X=X, y=y,
classifier=self.is_classif))
else:
# no cross-validation needed, user supplied all params
self.cv_ = [(np.arange(n_samples), [])]
n_folds = len(self.cv_)
# number of problems to solve
if self.is_classif:
y = self._binarize_y(y)
else:
y = y[:, np.newaxis]
if self.is_classif and self.n_classes_ > 2:
n_problems = self.n_classes_
else:
n_problems = 1
# standardize y
self.ymean_ = np.zeros(y.shape[0])
if n_problems == 1:
y = y[:, 0]
# scores & mean weights map over all folds
self.cv_scores_ = [[] for i in range(n_problems)]
w = np.zeros((n_problems, X.shape[1] + 1))
self.all_coef_ = np.ndarray((n_problems, n_folds, X.shape[1]))
self.screening_percentile_ = _adjust_screening_percentile(
self.screening_percentile, self.mask_img_, verbose=self.verbose)
# main loop: loop on classes and folds
solver_params = dict(tol=self.tol, max_iter=self.max_iter)
self.best_model_params_ = []
self.alpha_grids_ = []
for (test_scores, best_w, best_alpha, best_l1_ratio, alphas,
y_train_mean, (cls, fold)) in Parallel(
n_jobs=self.n_jobs, verbose=2 * self.verbose)(
delayed(self._cache(path_scores, func_memory_level=2))(
solver, X, y[:, cls] if n_problems > 1 else y, self.mask_,
alphas, l1_ratios, self.cv_[fold][0], self.cv_[fold][1],
solver_params, n_alphas=self.n_alphas, eps=self.eps,
is_classif=self.loss == "logistic", key=(cls, fold),
debias=self.debias, verbose=self.verbose,
screening_percentile=self.screening_percentile_,
) for cls in range(n_problems) for fold in range(n_folds)):
self.best_model_params_.append((best_alpha, best_l1_ratio))
self.alpha_grids_.append(alphas)
self.ymean_[cls] += y_train_mean
self.all_coef_[cls, fold] = best_w[:-1]
if len(np.atleast_1d(l1_ratios)) == 1:
test_scores = test_scores[0]
self.cv_scores_[cls].append(test_scores)
w[cls] += best_w
# misc
self.cv_scores_ = np.array(self.cv_scores_)
self.alpha_grids_ = np.array(self.alpha_grids_)
self.ymean_ /= n_folds
if not self.is_classif:
self.all_coef_ = np.array(self.all_coef_)
w = w[0]
self.ymean_ = self.ymean_[0]
# bagging: average best weights maps over folds
w /= n_folds
# set coefs and intercepts
self._set_coef_and_intercept(w)
# unmask weights map as a niimg
self.coef_img_ = self.masker_.inverse_transform(self.coef_)
# report time elapsed
if self.verbose:
duration = time.time() - tic
print("Time Elapsed: %g seconds, %i minutes." % (
duration, duration / 60.))
return self
def decision_function(self, X):
"""Predict confidence scores for samples
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
# handle regression (least-squared loss)
if not self.is_classif:
return LinearModel.decision_function(self, X)
X = atleast2d_or_csr(X)
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : list of Niimg-like objects
See http://nilearn.github.io/manipulating_images/input_output.html
Data on prediction is to be made. If this is a list,
the affine is considered the same for all.
Returns
-------
y_pred : ndarray, shape (n_samples,)
Predicted class label per sample.
"""
# cast X into usual 2D array
if not hasattr(self, "masker_"):
raise RuntimeError("This %s instance is not fitted yet!" % (
self.__class__.__name__))
X = self.masker_.transform(X)
# handle regression (least-squared loss)
if not self.is_classif:
return LinearModel.predict(self, X)
# prediction proper
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
class SpaceNetClassifier(BaseSpaceNet):
"""Classification learners with sparsity and spatial priors.
`SpaceNetClassifier` implements Graph-Net and TV-L1
priors / penalties for classification problems. Thus, the penalty
is a sum an L1 term and a spatial term. The aim of such a hybrid prior
is to obtain weights maps which are structured (due to the spatial
prior) and sparse (enforced by L1 norm).
Parameters
----------
penalty : string, optional (default 'graph-net')
Penalty to used in the model. Can be 'graph-net' or 'tv-l1'.
loss : string, optional (default "logistic")
Loss to be used in the classifier. Must be one of "mse", or "logistic".
l1_ratios : float or list of floats in the interval [0, 1]; optional (default .5)
Constant that mixes L1 and spatial prior terms in penalization.
l1_ratio == 1 corresponds to pure LASSO. The larger the value of this
parameter, the sparser the estimated weights map. If list is provided,
then the best value will be selected by cross-validation.
alphas : float or list of floats, optional (default None)
Choices for the constant that scales the overall regularization term.
This parameter is mutually exclusive with the `n_alphas` parameter.
If None or list of floats is provided, then the best value will be
selected by cross-validation.
n_alphas : int, optional (default 10).
Generate this number of alphas per regularization path.
This parameter is mutually exclusive with the `alphas` parameter.
eps : float, optional (default 1e-3)
Length of the path. For example, ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
mask : filename, niimg, NiftiMasker instance, optional default None)
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is it will be computed
automatically by a MultiNiftiMasker with default parameters.
target_affine : 3x3 or 4x4 matrix, optional (default None)
This parameter is passed to image.resample_img. Please see the
related documentation for details.
target_shape : 3-tuple of integers, optional (default None)
This parameter is passed to image.resample_img. Please see the
related documentation for details.
low_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
high_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
t_r : float, optional (default None)
This parameter is passed to signal.clean. Please see the related
documentation for details.
screening_percentile : float in the interval [0, 100]; Optional (default 20)
Percentile value for ANOVA univariate feature selection. A value of
100 means 'keep all features'. This percentile is is expressed
w.r.t the volume of a standard (MNI152) brain, and so is corrected
at runtime by premultiplying it with the ratio of the volume of the
mask of the data and volume of a standard brain. If '100' is given,
all the features are used, regardless of the number of voxels.
standardize : bool, optional (default True):
If set, then we'll center the data (X, y) have mean zero along axis 0.
This is here because nearly all linear models will want their data
to be centered.
fit_intercept : bool, optional (default True)
Fit or not an intercept.
max_iter : int (default 1000)
Defines the iterations for the solver.
tol : float
Defines the tolerance for convergence. Defaults to 1e-4.
verbose : int, optional (default 1)
Verbosity level.
n_jobs : int, optional (default 1)
Number of jobs in solving the sub-problems.
memory: instance of joblib.Memory or string
Used to cache the masking process.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
memory_level: integer, optional (default 1)
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
cv : int, a cv generator instance, or None (default 8)
The input specifying which cross-validation generator to use.
It can be an integer, in which case it is the number of folds in a
KFold, None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
debias : bool, optional (default False)
If set, then the estimated weights maps will be debiased.
Attributes
----------
`alpha_` : float
Best alpha found by cross-validation.
`coef_` : array, shape = [n_classes-1, n_features]
Coefficient of the features in the decision function.
`masker_` : instance of NiftiMasker
The nifti masker used to mask the data.
`mask_img_` : Nifti like image
The mask of the data. If no mask was given at masker creation, contains
the automatically computed mask.
`intercept_` : array, shape = [n_classes-1]
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True.
`cv_` : list of pairs of lists
Each pair are the list of indices for the train and test
samples for the corresponding fold.
`cv_scores_` : 2d array of shape (n_alphas, n_folds)
Scores (misclassification) for each alpha, and on each fold.
`screening_percentile_` : float
Screening percentile corrected according to volume of mask,
relative to the volume of standard brain.
"""
def __init__(self, penalty="graph-net", loss="logistic",
l1_ratios=.5, alphas=None, n_alphas=10, mask=None,
target_affine=None, target_shape=None, low_pass=None,
high_pass=None, t_r=None, max_iter=1000, tol=1e-4,
memory=Memory(None), memory_level=1, standardize=True,
verbose=1, n_jobs=1, eps=1e-3,
cv=8, fit_intercept=True, screening_percentile=20.,
debias=False):
super(SpaceNetClassifier, self).__init__(
penalty=penalty, is_classif=True, l1_ratios=l1_ratios,
alphas=alphas, n_alphas=n_alphas, target_shape=target_shape,
low_pass=low_pass, high_pass=high_pass, mask=mask, t_r=t_r,
max_iter=max_iter, tol=tol, memory=memory,
memory_level=memory_level,
n_jobs=n_jobs, eps=eps, cv=cv, debias=debias,
fit_intercept=fit_intercept, standardize=standardize,
screening_percentile=screening_percentile, loss=loss,
target_affine=target_affine, verbose=verbose)
def _binarize_y(self, y):
"""Helper function invoked just before fitting a classifier."""
y = np.array(y)
# encode target classes as -1 and 1
self._enc = LabelBinarizer(pos_label=1, neg_label=-1)
y = self._enc.fit_transform(y)
self.classes_ = self._enc.classes_
self.n_classes_ = len(self.classes_)
return y
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : list of Niimg-like objects
See http://nilearn.github.io/manipulating_images/input_output.html
Data on which model is to be fitted. If this is a list,
the affine is considered the same for all.
y : array or list of length n_samples.
Labels.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t y.
"""
return accuracy_score(y, self.predict(X))
class SpaceNetRegressor(BaseSpaceNet):
"""Regression learners with sparsity and spatial priors.
`SpaceNetClassifier` implements Graph-Net and TV-L1 priors / penalties
for regression problems. Thus, the penalty is a sum an L1 term and a
spatial term. The aim of such a hybrid prior is to obtain weights maps
which are structured (due to the spatial prior) and sparse (enforced
by L1 norm).
Parameters
----------
penalty : string, optional (default 'graph-net')
Penalty to used in the model. Can be 'graph-net' or 'tv-l1'.
l1_ratios : float or list of floats in the interval [0, 1]; optional (default .5)
Constant that mixes L1 and spatial prior terms in penalization.
l1_ratio == 1 corresponds to pure LASSO. The larger the value of this
parameter, the sparser the estimated weights map. If list is provided,
then the best value will be selected by cross-validation.
alphas : float or list of floats, optional (default None)
Choices for the constant that scales the overall regularization term.
This parameter is mutually exclusive with the `n_alphas` parameter.
If None or list of floats is provided, then the best value will be
selected by cross-validation.
n_alphas : int, optional (default 10).
Generate this number of alphas per regularization path.
This parameter is mutually exclusive with the `alphas` parameter.
eps : float, optional (default 1e-3)
Length of the path. For example, ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
mask : filename, niimg, NiftiMasker instance, optional default None)
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is it will be computed
automatically by a MultiNiftiMasker with default parameters.
target_affine : 3x3 or 4x4 matrix, optional (default None)
This parameter is passed to image.resample_img. Please see the
related documentation for details.
target_shape : 3-tuple of integers, optional (default None)
This parameter is passed to image.resample_img. Please see the
related documentation for details.
low_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
high_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
t_r : float, optional (default None)
This parameter is passed to signal.clean. Please see the related
documentation for details
screening_percentile : float in the interval [0, 100]; Optional (default 20)
Percentile value for ANOVA univariate feature selection. A value of
100 means 'keep all features'. This percentile is is expressed
w.r.t the volume of a standard (MNI152) brain, and so is corrected
at runtime to correspond to the volume of the user-supplied mask
(which is typically smaller).
standardize : bool, optional (default True):
If set, then we'll center the data (X, y) have mean zero along axis 0.
This is here because nearly all linear models will want their data
to be centered.
fit_intercept : bool, optional (default True)
Fit or not an intercept.
max_iter : int (default 1000)
Defines the iterations for the solver.
tol : float
Defines the tolerance for convergence. Defaults to 1e-4.
verbose : int, optional (default 1)
Verbosity level.
n_jobs : int, optional (default 1)
Number of jobs in solving the sub-problems.
memory: instance of joblib.Memory or string
Used to cache the masking process.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
memory_level: integer, optional (default 1)
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
cv : int, a cv generator instance, or None (default 8)
The input specifying which cross-validation generator to use.
It can be an integer, in which case it is the number of folds in a
KFold, None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
debias: bool, optional (default False)
If set, then the estimated weights maps will be debiased.
Attributes
----------
`alpha_` : float
Best alpha found by cross-validation
`coef_` : array, shape = [n_classes-1, n_features]
Coefficient of the features in the decision function.
`masker_` : instance of NiftiMasker
The nifti masker used to mask the data.
`mask_img_` : Nifti like image
The mask of the data. If no mask was given at masker creation, contains
the automatically computed mask.
`intercept_` : array, shape = [n_classes-1]
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True.
`cv_scores_` : 2d array of shape (n_alphas, n_folds)
Scores (misclassification) for each alpha, and on each fold
`screening_percentile_` : float
Screening percentile corrected according to volume of mask,
relative to the volume of standard brain.
"""
def __init__(self, penalty="graph-net", l1_ratios=.5, alphas=None,
n_alphas=10, mask=None, target_affine=None,
target_shape=None, low_pass=None, high_pass=None, t_r=None,
max_iter=1000, tol=1e-4, memory=Memory(None), memory_level=1,
standardize=True, verbose=1, n_jobs=1, eps=1e-3, cv=8,
fit_intercept=True, screening_percentile=20., debias=False):
super(SpaceNetRegressor, self).__init__(
penalty=penalty, is_classif=False, l1_ratios=l1_ratios,
alphas=alphas, n_alphas=n_alphas, target_shape=target_shape,
low_pass=low_pass, high_pass=high_pass, mask=mask, t_r=t_r,
max_iter=max_iter, tol=tol, memory=memory,
memory_level=memory_level,
n_jobs=n_jobs, eps=eps, cv=cv, debias=debias,
fit_intercept=fit_intercept, standardize=standardize,
screening_percentile=screening_percentile,
target_affine=target_affine, verbose=verbose)
|
py | b40c31d4f1295087b60205cc8b3fb38e9c21dfb7 | # -*- coding: utf-8 -*-
from tccli.services.gme.gme_client import action_caller
|
py | b40c3230c3d5892660f35bbd54e2396f0734c6a1 | # -*- coding: utf-8 -*-
"""Authentication configuration."""
import logging
import pyramid_authsanity
from pyramid.authentication import RemoteUserAuthenticationPolicy
from h.auth.policy import (
APIAuthenticationPolicy,
AuthClientPolicy,
AuthenticationPolicy,
TokenAuthenticationPolicy,
)
from h.auth.util import default_authority, groupfinder
from h.security import derive_key
__all__ = ("DEFAULT_POLICY", "WEBSOCKET_POLICY")
log = logging.getLogger(__name__)
PROXY_POLICY = RemoteUserAuthenticationPolicy(
environ_key="HTTP_X_FORWARDED_USER", callback=groupfinder
)
TICKET_POLICY = pyramid_authsanity.AuthServicePolicy()
TOKEN_POLICY = TokenAuthenticationPolicy(callback=groupfinder)
AUTH_CLIENT_POLICY = AuthClientPolicy()
API_POLICY = APIAuthenticationPolicy(
user_policy=TOKEN_POLICY, client_policy=AUTH_CLIENT_POLICY
)
DEFAULT_POLICY = AuthenticationPolicy(
api_policy=API_POLICY, fallback_policy=TICKET_POLICY
)
WEBSOCKET_POLICY = TOKEN_POLICY
def includeme(config):
global DEFAULT_POLICY
global WEBSOCKET_POLICY
# Set up authsanity
settings = config.registry.settings
settings["authsanity.source"] = "cookie"
settings["authsanity.cookie.max_age"] = 2592000
settings["authsanity.cookie.httponly"] = True
settings["authsanity.secret"] = derive_key(
settings["secret_key"], settings["secret_salt"], b"h.auth.cookie_secret"
)
config.include("pyramid_authsanity")
if config.registry.settings.get("h.proxy_auth"):
log.warning(
"Enabling proxy authentication mode: you MUST ensure that "
"the X-Forwarded-User request header can ONLY be set by "
"trusted downstream reverse proxies! Failure to heed this "
"warning will result in ALL DATA stored by this service "
"being available to ANYONE!"
)
DEFAULT_POLICY = AuthenticationPolicy(
api_policy=API_POLICY, fallback_policy=PROXY_POLICY
)
WEBSOCKET_POLICY = TOKEN_POLICY
# Set the default authentication policy. This can be overridden by modules
# that include this one.
config.set_authentication_policy(DEFAULT_POLICY)
# Allow retrieval of the authority from the request object.
config.add_request_method(default_authority, name="default_authority", reify=True)
# Allow retrieval of the auth token (if present) from the request object.
config.add_request_method(".tokens.auth_token", reify=True)
|
py | b40c3239e7702f77abab8a47d82466cae2ab5eee | import time
from cannon import Shell, Account
from loguru import logger
@logger.catch
def main():
print("Logging into route-views")
conn = Shell('route-views.routeviews.org', credentials=(Account('rviews', ''),),
auto_priv_mode=False, log_file='', log_screen=True, debug=0)
conn.execute('term len 0')
#conn.sync_prompt(require_detect_prompt=False)
conn.execute('show interface te0/0/0')
conn.execute('show ip vrf')
conn.execute('show ip bgp summ')
conn.execute('show proc cpu sort')
conn.execute('show inventory')
conn.execute('show users')
conn.execute('ping 4.2.2.2')
for ii in range(0, 3):
conn.execute('show version')
version = conn.response
conn.close()
main()
#intfs = conn.execute('show ip int brief', template="""Value INTF (\S+)\nValue IPADDR (\S+)\nValue STATUS (up|down|administratively down)\nValue PROTO (up|down)\n\nStart\n ^${INTF}\s+${IPADDR}\s+\w+\s+\w+\s+${STATUS}\s+${PROTO} -> Record""")
#print(intfs)
#print("-----------------")
#print(version)
|
py | b40c325bd366c0d37c3330b3abe98b7b4e2b1784 | import base64
import hashlib
import hmac
import os
import sys
import time
import requests
import json
import logging
from maj.utils.fileutils import get_all_files
log = logging.getLogger(__name__)
class Identifier:
def __init__(self, key, secret, url):
self.access_key = key
self.access_secret = secret
self.url = url
self.is_identifying = False
self.http_method = "POST"
self.http_uri = "/v1/identify"
self.signature_version = "1"
def identify(self, file_path, data_type='audio'):
self.is_identifying = True
timestamp = time.time()
string_to_sign = self.http_method + "\n" + self.http_uri + "\n" + self.access_key + "\n" + data_type + "\n" + self.signature_version + "\n" + str(
timestamp)
sign = base64.b64encode(hmac.new(self.access_secret.encode('ascii'), string_to_sign.encode('ascii'),
digestmod=hashlib.sha1).digest()).decode('ascii')
f = open(file_path, "rb")
sample_bytes = os.path.getsize(file_path)
files = [
('sample', ('sample.mp4', f, 'audio/mpeg'))
]
data = {'access_key': self.access_key,
'sample_bytes': sample_bytes,
'timestamp': str(timestamp),
'signature': sign,
'data_type': data_type,
"signature_version": self.signature_version}
r = requests.post(self.url, files=files, data=data)
r.encoding = "utf-8"
self.is_identifying = False
if r.status_code == 200:
response = r.json()
return response
else:
log.warning(str(r.status_code) + ' - ' + r.reason)
return None
def get_song_info_from_response(self, response):
if response is None or response['status']['msg'] != "Success":
log.warning(response)
return None
if len(response['metadata']['music']) < 1:
log.info(response)
return None
else:
song = response['metadata']['music'][0]
title = song['title']
artists = [v['name'] for v in song['artists']]
album = song['album']['name']
return {'title': title,
'artists': artists,
'album': album,
'duration_s': song['duration_ms'] / 1000,
'multipleResults': len(response['metadata']['music']) > 1}
def sample_get():
"""
Sample function to show usage of Identifier
"""
config = {}
with open('config.json') as f:
config = json.load(f)
# GET FROM ACR
access_key = config['acrKey']
access_secret = config['acrSecret']
requrl = config['acrHostUrl']
identifier = Identifier(access_key, access_secret, requrl)
response = identifier.identify(
'F:\\twitch\\recorded\\myanalogjournal_\\myanalogjournal_ - 2021-06-30 15h47m20s.mp4')
info = identifier.get_song_info_from_response(response)
if info is None:
print("Could not identify the current song ...")
else:
print(info)
print("Currently playing: " + info['title'] + '\nBy artist(s): ' + ';'.join(
info['artists']) + '\nAlbum: ' + info['album'])
def demo_create_from_files():
from maj.songlist import Song,SongList
import datetime
config = {}
with open('config.json') as f:
config = json.load(f)
# GET FROM ACR
access_key = config['acrKey']
access_secret = config['acrSecret']
requrl = config['acrHostUrl']
identifier = Identifier(access_key, access_secret, requrl)
playlist = SongList(config['recordedSavePath'], config['channel'], datetime.datetime.today())
files = get_all_files('F:\\twitch\\recorded\\myanalogjournal_', recursive=False)
files.sort()
for f in files:
print(f)
if '2021-06-30' not in f:
continue
response = identifier.identify(f)
info = identifier.get_song_info_from_response(response)
if info is not None:
playlist.add(Song(info))
# if __name__ == "__main__":
# demo_create_from_files()
|
py | b40c338860eb07de0fef2847a3ee9fe3bf850c81 | import cProfile
import logging
import time
import traceback
from typing import Any, AnyStr, Dict, \
Iterable, List, MutableMapping, Optional, \
Union
from django.conf import settings
from django.contrib.sessions.backends.base import UpdateError
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import DisallowedHost, SuspiciousOperation
from django.db import connection
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.shortcuts import render
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import cookie_date
from django.utils.translation import ugettext as _
from django.views.csrf import csrf_failure as html_csrf_failure
from zerver.lib.bugdown import get_bugdown_requests, get_bugdown_time
from zerver.lib.cache import get_remote_cache_requests, get_remote_cache_time
from zerver.lib.debug import maybe_tracemalloc_listen
from zerver.lib.db import reset_queries
from zerver.lib.exceptions import ErrorCode, JsonableError, RateLimited
from zerver.lib.html_to_text import get_content_description
from zerver.lib.queue import queue_json_publish
from zerver.lib.response import json_error, json_response_from_error
from zerver.lib.subdomains import get_subdomain
from zerver.lib.utils import statsd
from zerver.lib.types import ViewFuncT
from zerver.models import Realm, flush_per_request_caches, get_realm
logger = logging.getLogger('zulip.requests')
def record_request_stop_data(log_data: MutableMapping[str, Any]) -> None:
log_data['time_stopped'] = time.time()
log_data['remote_cache_time_stopped'] = get_remote_cache_time()
log_data['remote_cache_requests_stopped'] = get_remote_cache_requests()
log_data['bugdown_time_stopped'] = get_bugdown_time()
log_data['bugdown_requests_stopped'] = get_bugdown_requests()
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
def async_request_timer_stop(request: HttpRequest) -> None:
record_request_stop_data(request._log_data)
def record_request_restart_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].enable()
log_data['time_restarted'] = time.time()
log_data['remote_cache_time_restarted'] = get_remote_cache_time()
log_data['remote_cache_requests_restarted'] = get_remote_cache_requests()
log_data['bugdown_time_restarted'] = get_bugdown_time()
log_data['bugdown_requests_restarted'] = get_bugdown_requests()
def async_request_timer_restart(request: HttpRequest) -> None:
if "time_restarted" in request._log_data:
# Don't destroy data when being called from
# finish_current_handler
return
record_request_restart_data(request._log_data)
def record_request_start_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"] = cProfile.Profile()
log_data["prof"].enable()
reset_queries()
log_data['time_started'] = time.time()
log_data['remote_cache_time_start'] = get_remote_cache_time()
log_data['remote_cache_requests_start'] = get_remote_cache_requests()
log_data['bugdown_time_start'] = get_bugdown_time()
log_data['bugdown_requests_start'] = get_bugdown_requests()
def timedelta_ms(timedelta: float) -> float:
return timedelta * 1000
def format_timedelta(timedelta: float) -> str:
if (timedelta >= 1):
return "%.1fs" % (timedelta,)
return "%.0fms" % (timedelta_ms(timedelta),)
def is_slow_query(time_delta: float, path: str) -> bool:
if time_delta < 1.2:
return False
is_exempt = \
path in ["/activity", "/json/report/error",
"/api/v1/deployments/report_error"] \
or path.startswith("/realm_activity/") \
or path.startswith("/user_activity/")
if is_exempt:
return time_delta >= 5
if 'webathena_kerberos' in path:
return time_delta >= 10
return True
statsd_blacklisted_requests = [
'do_confirm', 'signup_send_confirm', 'new_realm_send_confirm,'
'eventslast_event_id', 'webreq.content', 'avatar', 'user_uploads',
'password.reset', 'static', 'json.bots', 'json.users', 'json.streams',
'accounts.unsubscribe', 'apple-touch-icon', 'emoji', 'json.bots',
'upload_file', 'realm_activity', 'user_activity'
]
def write_log_line(log_data: MutableMapping[str, Any], path: str, method: str, remote_ip: str, email: str,
client_name: str, status_code: int=200, error_content: Optional[AnyStr]=None,
error_content_iter: Optional[Iterable[AnyStr]]=None) -> None:
assert error_content is None or error_content_iter is None
if error_content is not None:
error_content_iter = (error_content,)
if settings.STATSD_HOST != '':
# For statsd timer name
if path == '/':
statsd_path = u'webreq'
else:
statsd_path = u"webreq.%s" % (path[1:].replace('/', '.'),)
# Remove non-ascii chars from path (there should be none, if there are it's
# because someone manually entered a nonexistent path), as UTF-8 chars make
# statsd sad when it sends the key name over the socket
statsd_path = statsd_path.encode('ascii', errors='ignore').decode("ascii")
# TODO: This could probably be optimized to use a regular expression rather than a loop.
suppress_statsd = any((blacklisted in statsd_path for blacklisted in statsd_blacklisted_requests))
else:
suppress_statsd = True
statsd_path = ''
time_delta = -1
# A time duration of -1 means the StartLogRequests middleware
# didn't run for some reason
optional_orig_delta = ""
if 'time_started' in log_data:
time_delta = time.time() - log_data['time_started']
if 'time_stopped' in log_data:
orig_time_delta = time_delta
time_delta = ((log_data['time_stopped'] - log_data['time_started']) +
(time.time() - log_data['time_restarted']))
optional_orig_delta = " (lp: %s)" % (format_timedelta(orig_time_delta),)
remote_cache_output = ""
if 'remote_cache_time_start' in log_data:
remote_cache_time_delta = get_remote_cache_time() - log_data['remote_cache_time_start']
remote_cache_count_delta = get_remote_cache_requests() - log_data['remote_cache_requests_start']
if 'remote_cache_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
remote_cache_time_delta += (log_data['remote_cache_time_stopped'] -
log_data['remote_cache_time_restarted'])
remote_cache_count_delta += (log_data['remote_cache_requests_stopped'] -
log_data['remote_cache_requests_restarted'])
if (remote_cache_time_delta > 0.005):
remote_cache_output = " (mem: %s/%s)" % (format_timedelta(remote_cache_time_delta),
remote_cache_count_delta)
if not suppress_statsd:
statsd.timing("%s.remote_cache.time" % (statsd_path,), timedelta_ms(remote_cache_time_delta))
statsd.incr("%s.remote_cache.querycount" % (statsd_path,), remote_cache_count_delta)
startup_output = ""
if 'startup_time_delta' in log_data and log_data["startup_time_delta"] > 0.005:
startup_output = " (+start: %s)" % (format_timedelta(log_data["startup_time_delta"]),)
bugdown_output = ""
if 'bugdown_time_start' in log_data:
bugdown_time_delta = get_bugdown_time() - log_data['bugdown_time_start']
bugdown_count_delta = get_bugdown_requests() - log_data['bugdown_requests_start']
if 'bugdown_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
bugdown_time_delta += (log_data['bugdown_time_stopped'] -
log_data['bugdown_time_restarted'])
bugdown_count_delta += (log_data['bugdown_requests_stopped'] -
log_data['bugdown_requests_restarted'])
if (bugdown_time_delta > 0.005):
bugdown_output = " (md: %s/%s)" % (format_timedelta(bugdown_time_delta),
bugdown_count_delta)
if not suppress_statsd:
statsd.timing("%s.markdown.time" % (statsd_path,), timedelta_ms(bugdown_time_delta))
statsd.incr("%s.markdown.count" % (statsd_path,), bugdown_count_delta)
# Get the amount of time spent doing database queries
db_time_output = ""
queries = connection.connection.queries if connection.connection is not None else []
if len(queries) > 0:
query_time = sum(float(query.get('time', 0)) for query in queries)
db_time_output = " (db: %s/%sq)" % (format_timedelta(query_time),
len(queries))
if not suppress_statsd:
# Log ms, db ms, and num queries to statsd
statsd.timing("%s.dbtime" % (statsd_path,), timedelta_ms(query_time))
statsd.incr("%s.dbq" % (statsd_path,), len(queries))
statsd.timing("%s.total" % (statsd_path,), timedelta_ms(time_delta))
if 'extra' in log_data:
extra_request_data = " %s" % (log_data['extra'],)
else:
extra_request_data = ""
logger_client = "(%s via %s)" % (email, client_name)
logger_timing = ('%5s%s%s%s%s%s %s' %
(format_timedelta(time_delta), optional_orig_delta,
remote_cache_output, bugdown_output,
db_time_output, startup_output, path))
logger_line = ('%-15s %-7s %3d %s%s %s' %
(remote_ip, method, status_code,
logger_timing, extra_request_data, logger_client))
if (status_code in [200, 304] and method == "GET" and path.startswith("/static")):
logger.debug(logger_line)
else:
logger.info(logger_line)
if (is_slow_query(time_delta, path)):
queue_json_publish("slow_queries", dict(
query="%s (%s)" % (logger_line, email)))
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
profile_path = "/tmp/profile.data.%s.%s" % (path.split("/")[-1], int(time_delta * 1000),)
log_data["prof"].dump_stats(profile_path)
# Log some additional data whenever we return certain 40x errors
if 400 <= status_code < 500 and status_code not in [401, 404, 405]:
assert error_content_iter is not None
error_content_list = list(error_content_iter)
if not error_content_list:
error_data = u''
elif isinstance(error_content_list[0], str):
error_data = u''.join(error_content_list)
elif isinstance(error_content_list[0], bytes):
error_data = repr(b''.join(error_content_list))
if len(error_data) > 200:
error_data = u"[content more than 200 characters]"
logger.info('status=%3d, data=%s, uid=%s' % (status_code, error_data, email))
class LogRequests(MiddlewareMixin):
# We primarily are doing logging using the process_view hook, but
# for some views, process_view isn't run, so we call the start
# method here too
def process_request(self, request: HttpRequest) -> None:
maybe_tracemalloc_listen()
request._log_data = dict()
record_request_start_data(request._log_data)
def process_view(self, request: HttpRequest, view_func: ViewFuncT,
args: List[str], kwargs: Dict[str, Any]) -> None:
# process_request was already run; we save the initialization
# time (i.e. the time between receiving the request and
# figuring out which view function to call, which is primarily
# importing modules on the first start)
request._log_data["startup_time_delta"] = time.time() - request._log_data["time_started"]
# And then completely reset our tracking to only cover work
# done as part of this request
record_request_start_data(request._log_data)
def process_response(self, request: HttpRequest,
response: StreamingHttpResponse) -> StreamingHttpResponse:
# The reverse proxy might have sent us the real external IP
remote_ip = request.META.get('HTTP_X_REAL_IP')
if remote_ip is None:
remote_ip = request.META['REMOTE_ADDR']
# Get the requestor's email address and client, if available.
try:
email = request._email
except Exception:
email = "unauth"
try:
client = request.client.name
except Exception:
client = "?"
if response.streaming:
content_iter = response.streaming_content
content = None
else:
content = response.content
content_iter = None
write_log_line(request._log_data, request.path, request.method,
remote_ip, email, client, status_code=response.status_code,
error_content=content, error_content_iter=content_iter)
return response
class JsonErrorHandler(MiddlewareMixin):
def process_exception(self, request: HttpRequest, exception: Exception) -> Optional[HttpResponse]:
if isinstance(exception, JsonableError):
return json_response_from_error(exception)
if request.error_format == "JSON":
logging.error(traceback.format_exc(), extra=dict(request=request))
return json_error(_("Internal server error"), status=500)
return None
class TagRequests(MiddlewareMixin):
def process_view(self, request: HttpRequest, view_func: ViewFuncT,
args: List[str], kwargs: Dict[str, Any]) -> None:
self.process_request(request)
def process_request(self, request: HttpRequest) -> None:
if request.path.startswith("/api/") or request.path.startswith("/json/"):
request.error_format = "JSON"
else:
request.error_format = "HTML"
class CsrfFailureError(JsonableError):
http_status_code = 403
code = ErrorCode.CSRF_FAILED
data_fields = ['reason']
def __init__(self, reason: str) -> None:
self.reason = reason # type: str
@staticmethod
def msg_format() -> str:
return _("CSRF Error: {reason}")
def csrf_failure(request: HttpRequest, reason: str="") -> HttpResponse:
if request.error_format == "JSON":
return json_response_from_error(CsrfFailureError(reason))
else:
return html_csrf_failure(request, reason)
class RateLimitMiddleware(MiddlewareMixin):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
if not settings.RATE_LIMITING:
return response
from zerver.lib.rate_limiter import max_api_calls, RateLimitedUser
# Add X-RateLimit-*** headers
if hasattr(request, '_ratelimit'):
# Right now, the only kind of limiting requests is user-based.
ratelimit_user_results = request._ratelimit['RateLimitedUser']
entity = RateLimitedUser(request.user)
response['X-RateLimit-Limit'] = str(max_api_calls(entity))
response['X-RateLimit-Reset'] = str(int(time.time() + ratelimit_user_results['secs_to_freedom']))
if 'remaining' in ratelimit_user_results:
response['X-RateLimit-Remaining'] = str(ratelimit_user_results['remaining'])
return response
# TODO: When we have Django stubs, we should be able to fix the
# type of exception back to just Exception; the problem is without
# stubs, mypy doesn't know that RateLimited's superclass
# PermissionDenied inherits from Exception.
def process_exception(self, request: HttpRequest,
exception: Union[Exception, RateLimited]) -> Optional[HttpResponse]:
if isinstance(exception, RateLimited):
entity_type = str(exception) # entity type is passed to RateLimited when raising
resp = json_error(
_("API usage exceeded rate limit"),
data={'retry-after': request._ratelimit[entity_type]['secs_to_freedom']},
status=429
)
resp['Retry-After'] = request._ratelimit[entity_type]['secs_to_freedom']
return resp
return None
class FlushDisplayRecipientCache(MiddlewareMixin):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
# We flush the per-request caches after every request, so they
# are not shared at all between requests.
flush_per_request_caches()
return response
class SessionHostDomainMiddleware(SessionMiddleware):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
try:
request.get_host()
except DisallowedHost:
# If we get a DisallowedHost exception trying to access
# the host, (1) the request is failed anyway and so the
# below code will do nothing, and (2) the below will
# trigger a recursive exception, breaking things, so we
# just return here.
return response
if (not request.path.startswith("/static/") and not request.path.startswith("/api/") and
not request.path.startswith("/json/")):
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
try:
get_realm(subdomain)
except Realm.DoesNotExist:
return render(request, "zerver/invalid_realm.html", status=404)
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
try:
request.session.save()
except UpdateError:
raise SuspiciousOperation(
"The request's session was deleted before the "
"request completed. The user may have logged "
"out in a concurrent request, for example."
)
host = request.get_host().split(':')[0]
# The subdomains feature overrides the
# SESSION_COOKIE_DOMAIN setting, since the setting
# is a fixed value and with subdomains enabled,
# the session cookie domain has to vary with the
# subdomain.
session_cookie_domain = host
response.set_cookie(
settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=session_cookie_domain,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
)
return response
class SetRemoteAddrFromForwardedFor(MiddlewareMixin):
"""
Middleware that sets REMOTE_ADDR based on the HTTP_X_FORWARDED_FOR.
This middleware replicates Django's former SetRemoteAddrFromForwardedFor middleware.
Because Zulip sits behind a NGINX reverse proxy, if the HTTP_X_FORWARDED_FOR
is set in the request, then it has properly been set by NGINX.
Therefore HTTP_X_FORWARDED_FOR's value is trusted.
"""
def process_request(self, request: HttpRequest) -> None:
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# For NGINX reverse proxy servers, the client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
def alter_content(request: HttpRequest, content: bytes) -> bytes:
first_paragraph_text = get_content_description(content, request)
return content.replace(request.placeholder_open_graph_description.encode("utf-8"),
first_paragraph_text.encode("utf-8"))
class FinalizeOpenGraphDescription(MiddlewareMixin):
def process_response(self, request: HttpRequest,
response: StreamingHttpResponse) -> StreamingHttpResponse:
if getattr(request, "placeholder_open_graph_description", None) is not None:
assert not response.streaming
response.content = alter_content(request, response.content)
return response
|
py | b40c33cf05f46bb9016e534d549d7a700d5551fc | # -*- coding: utf-8 -*-
HGNC_GENE_FAMILY_URL = 'http://www.genenames.org/cgi-bin/genefamilies/download-all/tsv'
|
py | b40c346f8cf23b9a66a7e97e2c4b343ef1cee7b2 | import datetime
import time
import json
import schedule
from libapi import *
checkList = []
def get_people():
with open("people.json","r") as f:
people = json.load(f)
return people
def change_date(date):
date = date.split("-")
new_date = date[0]
for x in date[1:]:
if x.__len__()<2:
new_date = new_date + "-0" + x
else:new_date = new_date + "-" +x
return new_date
def checkMode(username,password):
p = libapi(username,password)
for x in p.history()["data"]["reservations"]:
date = change_date(x["date"])
if date == p.dates()[0]:
if x["stat"] == "RESERVE":
return [True,x["begin"]]
return [False]
def all_check_time(people):
global checkList
for person in people:
Mode = checkMode(person["username"],person["password"])
if(Mode[0]):
print(person,Mode)
checkList.append({"username":person["username"],"password":person["password"],"time":Mode[1]})
else:
print(person,"No RESERVE")
print("---------------------------------------")
def check_can():
global checkList
time = datetime.datetime.now()
pan_time = time.hour*60+time.minute
for user in checkList:
user_times = [int(x) for x in user["time"].split(":")]
user_time = user_times[0]*60+user_times[1]
if user_time <= pan_time+43:
MycheckIn(user["username"],user["password"])
checkList.remove(user)
def MycheckIn(username,password):
p = libapi(username,password)
c = p.checkIn()
print(datetime.datetime.now(),username,c)
print("-------------------------------------")
def first_check():
all_check_time(get_people())
check_can()
def main():
c1 = schedule.every().day.at("10:36").do(all_check_time,get_people())
print(c1)
print("-------------------------------------")
c2 = schedule.every(10).seconds.do(check_can)
print(c2)
print("-------------------------------------")
first_check()
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == '__main__':
main()
|
py | b40c3511a2306a1ca7187f372397a9174e0bd511 | class Adjacente: #criando a classe
def __init__(self, cidade, distancia): #metodo construtor
self.cidade = cidade #atributo
self.distancia = distancia
#f(n)= g(n) + h(n)
self.distanciaAestrela = self.distancia + self.cidade.distanciaObjetivo |
py | b40c353733b0aa94525eec23d896b95bd604dc29 | """
"""
import base64
import json
import threading
import sys
import requests
import math
import pytz
from Crypto.Cipher import AES
from datetime import datetime
from typing import Dict, Any, List
from urllib import parse
from vnpy.api.rest import RestClient, Request
from vnpy.api.websocket import WebsocketClient
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
Product,
Status,
OrderType,
)
from vnpy.trader.gateway import BaseGateway, LocalOrderManager
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
PositionData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
)
AUTHENTICA_URL = "https://dev-kgl.jt00000.com/kgl-third-authorization/oauth/token"
QUOTE_URL = "https://kgl.jt00000.com/hq"
REST_HOST = "https://dev-kgl.jt00000.com/kgl-trade-service"
WEBSOCKET_TRADE_HOST = "ws://dev-kgl.jt00000.com/kgl-trader-push-service/ws"
WEBSOCKET_DATA_HOST = "ws://dev-kgl.jt00000.com/dz_app_ws/ws"
START_PUSH = 200
STOP_PUSH = 201
SYNCHRONIZE_PUSH = 250
QUERY_HISTORY = 36
QUERY_CONTRACT = 52
ON_TICK = 251
PING = 2
PONG = 3
LOGIN = 10
HKSE_MARKET = 2002
CREATION = 101
UPDATE = 102
TRADE = 103
CANCELLATION = 104
ACCOUT = 106
POSITION = 105
STATUS_KAISA2VT = {
"NEW": Status.NOTTRADED,
"WA": Status.NOTTRADED,
"PRO": Status.NOTTRADED,
"Q": Status.NOTTRADED,
"REJ": Status.REJECTED,
"PEX": Status.PARTTRADED,
"FEX": Status.ALLTRADED,
"CAN": Status.CANCELLED,
}
ORDERTYPE_VT2KAISA = {
OrderType.MARKET: "A",
OrderType.LIMIT: "L",
}
ORDERTYPE_KAISA2VT = {v: k for k, v in ORDERTYPE_VT2KAISA.items()}
ORDERTYPE_KAISA2VT["E"] = OrderType.LIMIT
ORDERTYPE_KAISA2VT["S"] = OrderType.LIMIT
ORDERTYPE_KAISA2VT["I"] = OrderType.LIMIT
DIRECTION_VT2KAISA = {
Direction.LONG: "B",
Direction.SHORT: "S",
}
DIRECTION_KAISA2VT = {v: k for k, v in DIRECTION_VT2KAISA.items()}
EXCHANGE_KAISA2VT: Dict[str, Exchange] = {
"HKEX": Exchange.HKSE
}
EXCHANGE_VT2KAISA = {v: k for k, v in EXCHANGE_KAISA2VT.items()}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
symbol_name_map = {}
class KaisaGateway(BaseGateway):
"""
VN Trader Gateway for Kaisa connection.
"""
authentica_status: bool = False
order_trade_id = {}
token: str = ""
req_id: int = 0
default_setting: Dict[str, Any] = {
"auth_id": "",
"auth_password": "",
"user_id": "",
"password": "",
"会话数": 3,
}
exchanges: List[Exchange] = [Exchange.HKSE]
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "KAISA")
self.order_manager = LocalOrderManager(self)
self.rest_api = KaisaTradeRestApi(self)
self.trade_ws_api = KaisaTradeWebsocketApi(self)
self.market_ws_api = KaisaDataWebsocketApi(self)
def connect(self, setting: dict) -> None:
""""""
auth_id = setting["auth_id"]
auth_password = setting["auth_password"]
user_id = setting["user_id"]
_password = setting["password"]
password = self.encrypt(_password)
session_number = setting["会话数"]
if not self.authentica_status:
self.authentica(auth_id, auth_password)
if not self.token:
return
self.rest_api.connect(
user_id, password, self.token, session_number)
self.query_contract()
self.trade_ws_api.connect(user_id, password, self.token)
self.market_ws_api.connect(user_id, password, self.token)
def subscribe(self, req: SubscribeRequest) -> int:
""""""
self.market_ws_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.rest_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
""""""
self.rest_api.cancel_order(req)
def query_account(self) -> None:
""""""
self.rest_api.query_account()
def query_position(self) -> None:
""""""
self.rest_api.query_position()
def close(self) -> None:
""""""
self.rest_api.stop()
self.trade_ws_api.stop()
self.market_ws_api.stop()
def write_error(self, data) -> None:
""""""
error_code = data["retCode"]
error_msg = data["retMsg"]
msg = f"错误号 {error_code}, 错误信息 {error_msg}"
self.write_error(msg)
def authentica(self, auth_id: str, auth_password: str) -> None:
""""""
params = {
"username": auth_id,
"password": auth_password,
"grant_type": "password",
"scope": "vnpy-xx"
}
headers = {
"Authorization": "basic dm5weV9jbGllbnQ6dm5weV9jbGllbnRfc2VjcmV0",
"Content-Type": "application/json"
}
response = requests.post(
url=AUTHENTICA_URL, params=params, headers=headers
)
if response.status_code // 100 == 2:
self.write_log("网关认证成功")
data = response.json()
token_body = data["body"]["accessToken"]
self.token = f"bearer {token_body}"
else:
data = response.json()
error_msg = data["retMsg"]
self.write_log(f"网关认证失败,原因: {error_msg}")
def _query_contract(self):
""""""
self.req_id += 1
data = {
"reqtype": QUERY_CONTRACT,
"reqid": self.req_id,
"session": "",
"data": {
"marketid": HKSE_MARKET,
"idtype": 1,
"beginpos": 0,
"count": 1000,
"getquote": 0
}
}
headers = {
"Authorization": self.token,
"Content-Type": "application/json"
}
response = requests.post(
url=QUOTE_URL, data=json.dumps(data), headers=headers)
status = response.status_code
if status == 200:
data = response.json()
symbols = data["data"]["symbol"]
for d in symbols:
symbol = d["code"]
name = d["name"]
symbol_name_map[symbol] = name
contract = ContractData(
symbol=d["code"],
exchange=Exchange.HKSE,
name=d["name"],
pricetick=math.pow(10, -d["dec"]),
size=1,
min_volume=d["lotsize"],
product=Product.SPOT,
history_data=True,
gateway_name=self.gateway_name,
)
self.on_contract(contract)
self.write_log("合约信息查询成功")
else:
self.write_log("合约查询失败")
def query_contract(self):
""""""
threading.Thread(target=self._query_contract).start()
def encrypt(self, text):
""""""
key = "login&pwd@glob)!"
iv = "kai&sa!global@)!"
cryptor = AES.new(key.encode("utf8"), AES.MODE_CBC, iv.encode("utf8"))
text_ajust = text.ljust(16, "\n")
ciphertext = cryptor.encrypt(bytes(text_ajust, encoding="utf8"))
encrypt_password = base64.b64encode(ciphertext)
password = parse.quote(encrypt_password, "\\")
return password
class KaisaTradeRestApi(RestClient):
"""
KAISA REST API
"""
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway: KaisaGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.order_manager: LocalOrderManager = gateway.order_manager
self.host: str = ""
self.user_id: str = ""
self.password: str = ""
self.account_id: str = ""
self.token: str = ""
self.trader_count: int = 10000
self.connect_time: int = 0
def sign(self, request) -> Request:
"""
Generate KAISA signature.
"""
request.headers = {
"Authorization": self.token
}
if request.method == "POST":
request.headers["Content-Type"] = "application/json"
if not request.params:
request.params = {"accountCode": self.user_id}
if request.data:
request.data = json.dumps(request.data)
return request
def connect(
self,
user_id: str,
password: str,
token: str,
session_number: int,
) -> None:
"""
Initialize connection to REST server.
"""
self.user_id = user_id
self.password = password
self.token = token
self.connect_time = int(datetime.now().strftime("%y%m%d%H%M%S"))
self.init(REST_HOST)
self.start(session_number)
self.gateway.write_log("REST API启动成功")
self.login()
def login(self) -> Request:
""""""
data = {
"channelType": "INTERNET",
"accountCode": self.user_id,
"password": self.password
}
self.add_request(
method="POST",
path="/v1/account/login",
callback=self.on_login,
data=data
)
def query_account(self) -> Request:
""""""
self.add_request(
method="GET",
path="/v1/account/accounts/balance",
callback=self.on_query_account,
)
def query_position(self) -> Request:
""""""
self.add_request(
method="GET",
path="/v1/account/accounts/position",
callback=self.on_query_position,
)
def query_order(self) -> Request:
""""""
self.add_request(
method="GET",
path="/v1/order/orders",
callback=self.on_query_order,
)
def send_order(self, req: OrderRequest) -> str:
""""""
local_orderid = self.order_manager.new_local_orderid()
order = req.create_order_data(
local_orderid,
self.gateway_name
)
order.datetime = datetime.now(CHINA_TZ)
data = {
"channelType": "I",
"exchangeCode": EXCHANGE_VT2KAISA[req.exchange],
"accountCode": self.user_id,
"productCode": req.symbol,
"price": req.price,
"qty": int(req.volume),
"bsFlag": DIRECTION_VT2KAISA[req.direction],
"orderType": ORDERTYPE_VT2KAISA[req.type],
}
self.add_request(
method="POST",
path="/v1/order/orders/place",
callback=self.on_send_order,
data=data,
extra=order,
on_error=self.on_send_order_error,
on_failed=self.on_send_order_failed
)
self.order_manager.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest) -> Request:
""""""
sys_orderid = self.order_manager.get_sys_orderid(req.orderid)
data = {
"channelType": "I",
"accountCode": self.user_id,
"orderID": sys_orderid
}
self.add_request(
method="POST",
path="/v1/order/orders/cancel",
callback=self.on_cancel_order,
on_failed=self.on_cancel_order_failed,
data=data,
extra=req
)
def on_login(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "账号登录"):
return
self.gateway.write_log("账户登陆成功")
self.query_account()
self.query_position()
self.query_order()
def on_query_account(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "查询账户"):
return
body = data["body"]
account = AccountData(
accountid=body["accountCode"],
balance=float(body["cash"]),
frozen=float(body["frozenCash"]),
gateway_name=self.gateway_name,
)
self.gateway.on_account(account)
def on_query_position(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "查询持仓"):
return
positions = data["body"]["holdShareUnitList"]
for d in positions:
position = PositionData(
symbol=d["productCode"],
exchange=EXCHANGE_KAISA2VT[d["exchangeCode"]],
direction=Direction.NET,
volume=int(d["qty"]),
frozen=int(d["qty"]) - int(d["availQty"]),
price=float(d["avgCost"]),
pnl=float(d["pl"]),
gateway_name=self.gateway_name
)
self.gateway.on_position(position)
def on_query_order(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "查询活动委托"):
return
body = data["body"]["mutilOrders"]
orders = body[::-1]
for d in orders:
sys_orderid = d["orderID"]
local_orderid = self.order_manager.get_local_orderid(sys_orderid)
traded = int(d["execQty"])
order = OrderData(
orderid=local_orderid,
symbol=d["productCode"],
exchange=Exchange.HKSE,
price=float(d["price"]),
volume=int(d["qty"]),
type=ORDERTYPE_KAISA2VT[d["orderType"]],
direction=DIRECTION_KAISA2VT[d["bsFlag"]],
offset=Offset.NONE,
traded=traded,
status=STATUS_KAISA2VT[d["orderStatus"]],
datetime=generate_datetime(d["createTime"]),
gateway_name=self.gateway_name,
)
self.order_manager.on_order(order)
if traded > 0:
self.trader_count += 1
self.gateway.order_trade_id[local_orderid] = self.trader_count
trade = TradeData(
orderid=local_orderid,
symbol=d["productCode"],
exchange=Exchange.HKSE,
tradeid=self.trader_count,
direction=DIRECTION_KAISA2VT[d["bsFlag"]],
offset=Offset.NONE,
volume=traded,
price=float(d["execPrice"]),
datetime=generate_datetime(d["updatedTime"]),
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
self.gateway.write_log(f"委托信息查询成功")
self.gateway.write_log("成交查询成功")
def on_send_order(self, data: dict, request: Request) -> None:
""""""
order = request.extra
if self.check_error(data, "委托"):
order.status = Status.REJECTED
self.order_manager.on_order(order)
return
sys_orderid = data["body"]["orderID"]
self.order_manager.update_orderid_map(order.orderid, sys_orderid)
def on_send_order_failed(self, status_code: str, request: Request) -> None:
"""
Callback when sending order failed on server.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
msg = f"委托失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_send_order_error(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Request
) -> None:
"""
Callback when sending order caused exception.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_cancel_order(self, data: dict, request: Request) -> None:
""""""
cancel_request = request.extra
local_orderid = cancel_request.orderid
order = self.order_manager.get_order_with_local_orderid(local_orderid)
if self.check_error(data, "撤单"):
order.status = Status.REJECTED
else:
order.status = Status.CANCELLED
self.gateway.write_log(f"委托撤单成功:{order.orderid}")
self.order_manager.on_order(order)
def on_cancel_order_failed(self, status_code: str, request: Request):
"""
Callback when canceling order failed on server.
"""
msg = f"撤单失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_error(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Request
) -> None:
"""
Callback to handler request exception.
"""
msg = f"触发异常,状态码:{exception_type},信息:{exception_value}"
self.gateway.write_log(msg)
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb, request)
)
def check_error(self, data: dict, func: str = ""):
""""""
if data["success"]:
return False
error_code = data["retCode"]
error_msg = data["retMsg"]
self.gateway.write_log(f"{func}请求出错,代码:{error_code},信息:{error_msg}")
return True
class KaisaWebsocketApiBase(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super(KaisaWebsocketApiBase, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.user_id: str = ""
self.password: str = ""
def connect(
self,
user_id: str,
password: str,
token: str,
url: str
) -> None:
""""""
self.user_id = user_id
self.password = password
self.init(
host=url,
header={"Authorization": token}
)
self.start()
def login(self):
""""""
data = {
"accountCode": self.user_id,
"password": self.password,
"ipAddress": "198.22.32.2"
}
req = self.generate_req(LOGIN, data)
return self.send_packet(req)
def generate_req(self, reqtype: int, data: dict) -> dict:
self.gateway.req_id += 1
req = {
"reqtype": reqtype,
"reqid": self.gateway.req_id,
"session": "",
"data": data
}
return req
def on_packet(self, packet: dict) -> None:
""""""
reqtype = packet["reqtype"]
data = packet["data"]
if packet["status"] != 0:
error_msg = packet["msg"]
msg = f"请求{reqtype}出错,错误信息{error_msg}"
self.gateway.write_log(msg)
else:
if reqtype == PING:
req = self.generate_req(PONG, {"ts": data["ts"]})
self.send_packet(req)
else:
self.on_data(reqtype, data)
def on_data(self, reqtype: int, data: dict) -> None:
""""""
print("data : {}".format(data))
class KaisaTradeWebsocketApi(KaisaWebsocketApiBase):
""""""
def __init__(self, gateway):
""""""
super().__init__(gateway)
self.order_manager = gateway.order_manager
self.order_manager.push_data_callback = self.on_data
self.event_callbacks = {
CREATION: self.on_order,
UPDATE: self.on_order,
TRADE: self.on_trade,
CANCELLATION: self.on_cancel_order,
ACCOUT: self.on_account,
POSITION: self.on_position,
}
def connect(self, user_id: str, password: str, token: str) -> None:
""""""
super().connect(user_id, password, token, WEBSOCKET_TRADE_HOST)
def on_connected(self) -> None:
""""""
self.gateway.write_log("交易Websocket API连接成功")
self.login()
def on_login(self, data):
""""""
self.gateway.write_log("交易Websocket API登录成功")
def on_data(self, reqtype: int, data: dict) -> None:
""""""
if reqtype == LOGIN:
self.on_login(data)
elif reqtype == SYNCHRONIZE_PUSH:
event = data["eventType"]
func = self.event_callbacks[event]
func(data)
def on_account(self, data: dict) -> None:
""""""
account = AccountData(
accountid=data["accountCode"],
balance=float(data["cash"]),
frozen=float(data["frozenCash"]),
gateway_name=self.gateway_name,
)
self.gateway.on_account(account)
def on_create_order(self, data) -> None:
""""""
pass
def on_cancel_order(self, data) -> None:
""""""
sys_orderid = data["orderID"]
order = self.order_manager.get_order_with_sys_orderid(sys_orderid)
order.status = STATUS_KAISA2VT[data["orderStatus"]]
self.order_manager.on_order(order)
def on_order(self, data: dict) -> None:
""""""
sys_orderid = str(data["orderID"])
local_orderid = self.order_manager.get_local_orderid(sys_orderid)
order = OrderData(
symbol=data["productCode"],
exchange=Exchange.HKSE,
orderid=local_orderid,
direction=DIRECTION_KAISA2VT[data["bsFlag"]],
price=float(data["price"]),
volume=int(data["qty"]),
traded=int(data["execQty"]),
status=STATUS_KAISA2VT[data["orderStatus"]],
datetime=generate_datetime(data["createTime"]),
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
def on_position(self, data: dict) -> None:
""""""
position = PositionData(
symbol=data["productCode"],
exchange=Exchange.HKSE,
direction=Direction.NET,
volume=int(data["qty"]),
frozen=int(data["qty"]) - int(data["availSellQty"]),
price=float(data["avgCost"]),
pnl=float(data["pl"]),
gateway_name=self.gateway_name
)
self.gateway.on_position(position)
def on_trade(self, data: dict) -> None:
""""""
sys_orderid = str(data["orderID"])
order = self.order_manager.get_order_with_sys_orderid(sys_orderid)
order.status = STATUS_KAISA2VT[data["orderStatus"]]
order.traded = int(data["execQty"])
self.gateway.on_order(order)
self.gateway.rest_api.trader_count += 1
trade = TradeData(
tradeid=str(self.gateway.rest_api.trader_count),
symbol=data["productCode"],
exchange=Exchange.HKSE,
orderid=order.orderid,
direction=order.direction,
price=float(data["execPrice"]),
volume=int(data["execQty"]),
datetime=generate_datetime(data["tradeTime"]),
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
class KaisaDataWebsocketApi(KaisaWebsocketApiBase):
""""""
connected_status: bool = False
def __init__(self, gateway):
""""""
super().__init__(gateway)
self.callbacks = {
START_PUSH: self.on_start_subscribe,
STOP_PUSH: self.on_stop_subscribe,
SYNCHRONIZE_PUSH: self.on_depth,
ON_TICK: self.on_tick,
}
def on_connected(self) -> None:
""""""
self.gateway.write_log("行情Websocket API连接成功")
self.connected_status = True
def on_data(self, reqtype: int, data: dict) -> None:
""""""
func = self.callbacks[reqtype]
func(data)
def on_tick(self, data):
""""""
ticks = data["tick"]
for d in ticks:
millisecond = str(d["millisecond"]).ljust(6, "0")
timestamp = f"{d['time']}.{millisecond}"
tick = TickData(
symbol=d["code"],
exchange=Exchange.HKSE,
name=symbol_name_map[d['code']],
datetime=generate_datetime(timestamp),
volume=d["volume"],
last_price=d["price"],
gateway_name=self.gateway_name
)
self.gateway.on_tick(tick)
def on_depth(self, data: dict) -> None:
""""""
pass
def on_start_subscribe(self, data: dict) -> None:
""""""
pass
def on_stop_subscribe(self, data: dict) -> None:
""""""
pass
def connect(self, userid: str, password: str, token: str) -> None:
""""""
super().connect(userid, password, token, WEBSOCKET_DATA_HOST)
def subscribe(self, req: SubscribeRequest) -> int:
""""""
if not self.connected_status:
return
data = [{
"market": 2002,
"code": req.symbol,
"type": 3,
"language": 0
}]
req = self.generate_req(200, data)
self.send_packet(req)
def generate_datetime(timestamp: str) -> datetime:
""""""
if "." in timestamp:
dt = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f")
else:
dt = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
dt = CHINA_TZ.localize(dt)
return dt
|
py | b40c373afa1df6170eb8fbbdb445f1b43a85d061 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.exceptions import ClientAuthenticationError
from .. import CredentialUnavailableError
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import AccessToken, TokenCredential
def _get_error_message(history):
attempts = []
for credential, error in history:
if error:
attempts.append("{}: {}".format(credential.__class__.__name__, error))
else:
attempts.append(credential.__class__.__name__)
return """
Attempted credentials:\n\t{}""".format(
"\n\t".join(attempts)
)
class ChainedTokenCredential(object):
"""A sequence of credentials that is itself a credential.
Its :func:`get_token` method calls ``get_token`` on each credential in the sequence, in order, returning the first
valid token received.
:param credentials: credential instances to form the chain
:type credentials: :class:`azure.core.credentials.TokenCredential`
"""
def __init__(self, *credentials):
# type: (*TokenCredential) -> None
if not credentials:
raise ValueError("at least one credential is required")
self._successful_credential = None # type: Optional[TokenCredential]
self.credentials = credentials
def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument
# type: (*str, **Any) -> AccessToken
"""Request a token from each chained credential, in order, returning the first token received.
.. note:: This method is called by Azure SDK clients. It isn't intended for use in application code.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:raises ~azure.core.exceptions.ClientAuthenticationError: no credential in the chain provided a token
"""
history = []
for credential in self.credentials:
try:
token = credential.get_token(*scopes, **kwargs)
self._successful_credential = credential
return token
except CredentialUnavailableError as ex:
# credential didn't attempt authentication because it lacks required data or state -> continue
history.append((credential, ex.message))
except Exception as ex: # pylint: disable=broad-except
# credential failed to authenticate, or something unexpectedly raised -> break
history.append((credential, str(ex)))
break
attempts = _get_error_message(history)
message = self.__class__.__name__ + " failed to retrieve a token from the included credentials." + attempts
raise ClientAuthenticationError(message=message)
|
py | b40c37a50f9f59645df9dc84d3a5877b9cbb4fcd | #
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import os
import signal
import sys
import threading
import warnings
try:
import _multiprocess as _multiprocessing
except ImportError:
import _multiprocessing
from . import spawn
from . import util
__all__ = ['ensure_running', 'register', 'unregister']
class SemaphoreTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
self._pid = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with self._lock:
if self._pid is not None:
# semaphore tracker was launched before, is it still running?
pid, status = os.waitpid(self._pid, os.WNOHANG)
if not pid:
# => still alive
return
# => dead, launch it again
os.close(self._fd)
self._fd = None
self._pid = None
warnings.warn('semaphore_tracker: process died unexpectedly, '
'relaunching. Some semaphores might leak.')
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from multiprocess.semaphore_tracker import main;main(%d)'
r, w = os.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
pid = util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(w)
raise
else:
self._fd = w
self._pid = pid
finally:
os.close(r)
def register(self, name):
'''Register name of semaphore with semaphore tracker.'''
self._send('REGISTER', name)
def unregister(self, name):
'''Unregister name of semaphore with semaphore tracker.'''
self._send('UNREGISTER', name)
def _send(self, cmd, name):
self.ensure_running()
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format(
nbytes, len(msg))
_semaphore_tracker = SemaphoreTracker()
ensure_running = _semaphore_tracker.ensure_running
register = _semaphore_tracker.register
unregister = _semaphore_tracker.unregister
getfd = _semaphore_tracker.getfd
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
cache = set()
try:
# keep track of registered/unregistered semaphores
with open(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
cache.add(name)
elif cmd == b'UNREGISTER':
cache.remove(name)
else:
raise RuntimeError('unrecognized command %r' % cmd)
except Exception:
try:
sys.excepthook(*sys.exc_info())
except:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
name = name.decode('ascii')
try:
_multiprocessing.sem_unlink(name)
except Exception as e:
warnings.warn('semaphore_tracker: %r: %s' % (name, e))
finally:
pass
|
py | b40c37bcaaab1bb4b3c667a4d14451734479a4f6 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hdf5VfdGds(CMakePackage, CudaPackage):
"""This package enables GPU Direct Storage Virtual File Driver in HDF5."""
# Package info
homepage = 'https://github.com/hpc-io/vfd-gds'
url = 'https://github.com/hpc-io/vfd-gds/archive/refs/tags/1.0.1.tar.gz'
git = 'https://github.com/hpc-io/vfd-gds.git'
maintainers = ['hyoklee', 'lrknox']
# Versions
version('master', branch='master')
version('1.0.1', sha256='00e125fd149561be991f41e883824de826d8add604aebccf103a4fb82d5faac2')
version('1.0.0', sha256='6b16105c7c49f13fc05784ee69b78d45fb159270c78d760689f9cd21e230ddd2')
# Dependencies
conflicts('~cuda')
depends_on('[email protected]:')
depends_on('[email protected]:')
def cmake_args(self):
# CMake options
args = [
self.define('BUILD_TESTING', self.run_tests),
]
return args
|
py | b40c3810aad84cb71f6887ca15991fb45587c293 | # Generated by Django 2.2.10 on 2020-03-17 08:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("salespersonTrackerREST", "0001_initial")]
operations = [
migrations.CreateModel(
name="TotalTargets",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("Task_Assigned", models.IntegerField()),
(
"User_ref",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="salespersonTrackerREST.Manager",
),
),
],
)
]
|
py | b40c38182e228f34419b10f24db4991f267c8e3c | import discord
def check_setup(ctx):
pass
#return db check server existence
def search_db(label):
pass
def insert_db(data):
pass |
py | b40c39136284c9555656fec94ad8e330e2f30468 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Verbose output and debugging facility
Examples:
from verbosity import verbose, debug; debug.active = [1,2,3]; debug(1, "blah")
"""
__docformat__ = 'restructuredtext'
from sys import stdout, stderr
# GOALS
# any logger should be able
# to log into a file or stdout/stderr
# provide ability to log with/without a new line at the end
#
# debug logger should be able
# to log sets of debug statements
# add/remove debug setid items
# give verbose description about registered debugset items
class Logger(object):
"""Base class to provide logging
"""
def __init__(self, handlers=None):
"""Initialize the logger with a set of handlers to use for output
Each hanlder must have write() method implemented
"""
if handlers == None:
handlers = [stdout]
self.__close_handlers = []
self.__handlers = [] # pylint friendliness
self._set_handlers(handlers)
self.__lfprev = True
self.__crprev = 0 # number of symbols in previous cr-ed
def __del__(self):
self._close_opened_handlers()
##REF: Name was automagically refactored
def _set_handlers(self, handlers):
"""Set list of handlers for the log.
A handler can be opened files, stdout, stderr, or a string, which
will be considered a filename to be opened for writing
"""
handlers_ = []
self._close_opened_handlers()
for handler in handlers:
if isinstance(handler, basestring):
try:
handler = {'stdout' : stdout,
'stderr' : stderr}[handler.lower()]
except:
try:
handler = open(handler, 'w')
self.__close_handlers.append(handler)
except:
raise RuntimeError, \
"Cannot open file %s for writing by the logger" \
% handler
handlers_.append(handler)
self.__handlers = handlers_
##REF: Name was automagically refactored
def _close_opened_handlers(self):
"""Close opened handlers (such as opened logfiles
"""
for handler in self.__close_handlers:
handler.close()
##REF: Name was automagically refactored
def _get_handlers(self):
"""Return active handlers
"""
return self.__handlers
def __call__(self, msg, args=None, lf=True, cr=False, **kwargs):
"""Write msg to each of the handlers.
It can append a newline (lf = Line Feed) or return
to the beginning before output and to take care about
cleaning previous message if present
it appends a newline (lf = Line Feed) since most commonly each
call is a separate message
"""
if args is not None:
try:
msg = msg % args
except Exception as e:
msg = "%s [%% FAILED due to %s]" % (msg, e)
if 'msgargs' in kwargs:
msg = msg % kwargs['msgargs']
if cr:
msg_ = ""
if self.__crprev > 0:
# wipe out older line to make sure to see no ghosts
msg_ = "\r%s" % (" "*self.__crprev)
msg_ += "\r" + msg
self.__crprev = len(msg)
msg = msg_
# since it makes no sense this days for cr and lf,
# override lf
lf = False
else:
self.__crprev += len(msg)
if lf:
msg = msg + "\n"
self.__crprev = 0 # nothing to clear
for handler in self.__handlers:
try:
handler.write(msg)
except:
print "Failed writing on handler %s" % handler
raise
try:
handler.flush()
except:
# it might be not implemented..
pass
self.__lfprev = lf
handlers = property(fget=_get_handlers, fset=_set_handlers)
lfprev = property(fget=lambda self:self.__lfprev)
class LevelLogger(Logger):
"""Logger not to log anything with a level smaller than specified.
"""
def __init__(self, level=0, indent=" ", *args, **kwargs):
"""
Parameters
----------
level : int, optional
Level to consider be active.
indent : str, optional
String to use for indentation.
"""
Logger.__init__(self, *args, **kwargs)
self.__level = level # damn pylint ;-)
self.__indent = indent
self._set_level(level)
self._set_indent(indent)
##REF: Name was automagically refactored
def _set_level(self, level):
"""Set logging level
"""
if __debug__:
try:
from mvpa2.base import debug
debug('VERBOSE', 'Setting verbosity to %r from %r',
(self.__level, level))
except:
pass
ilevel = int(level)
if ilevel < 0:
raise ValueError, \
"Negative verbosity levels (got %d) are not supported" \
% ilevel
self.__level = ilevel
##REF: Name was automagically refactored
def _set_indent(self, indent):
"""Either to indent the lines based on message log level"""
self.__indent = "%s" % indent
def __call__(self, level, msg, *args, **kwargs):
"""Write msg and indent using self.indent it if it was requested.
It appends a newline since most commonly each call is a separate
message
"""
if level <= self.level:
if self.lfprev and self.indent:
# indent if previous line ended with newline
msg = self.indent * level + msg
Logger.__call__(self, msg, *args, **kwargs)
level = property(fget=lambda self: self.__level, fset=_set_level)
indent = property(fget=lambda self: self.__indent, fset=_set_indent)
class OnceLogger(Logger):
"""Logger which prints a message for a given ID just once.
It could be used for one-time warning to don't overfill the output
with useless repeatative messages.
"""
def __init__(self, *args, **kwargs):
"""Define once logger.
"""
Logger.__init__(self, *args, **kwargs)
self._known = {}
def __call__(self, ident, msg, count=1, *args, **kwargs):
"""Write `msg` if `ident` occured less than `count` times by now.
"""
if ident not in self._known:
self._known[ident] = 0
if count < 0 or self._known[ident] < count:
self._known[ident] += 1
Logger.__call__(self, msg, *args, **kwargs)
class SetLogger(Logger):
"""Logger which prints based on defined sets identified by Id.
"""
def __init__(self, register=None, active=None, printsetid=True,
*args, **kwargs):
"""
Parameters
----------
register : dict or None
What Ids are to be known. Each item dictionary contains consists
of concise key and a description as the value.
active : iterable
What Ids to consider active upon initialization.
printsetid : bool, optional
Either to prefix each line with the target Id of a set in which
the line was printed to (default behavior).
"""
if register is None:
register = {}
if active == None:
active = []
Logger.__init__(self, *args, **kwargs)
self.__printsetid = printsetid
self.__registered = register # all "registered" sets descriptions
# which to output... pointless since __registered
self._set_active(active)
self._set_printsetid(printsetid)
##REF: Name was automagically refactored
def _set_active(self, active):
"""Set active logging set
"""
# just unique entries... we could have simply stored Set I guess,
# but then smth like debug.active += ["BLAH"] would not work
from mvpa2.base import verbose
self.__active = []
registered_keys = self.__registered.keys()
for item in list(set(active)):
if item == '':
continue
if isinstance(item, basestring):
if item in ['?', 'list', 'help']:
self.print_registered(detailed=(item != '?'))
raise SystemExit(0)
if item.upper() == "ALL":
verbose(2, "Enabling all registered debug handlers")
self.__active = registered_keys
break
# try to match item as it is regexp
regexp_str = "^%s$" % item
try:
regexp = re.compile(regexp_str)
except:
raise ValueError, \
"Unable to create regular expression out of %s" % item
matching_keys = filter(regexp.match, registered_keys)
toactivate = matching_keys
if len(toactivate) == 0:
ids = self.registered.keys()
ids.sort()
raise ValueError, \
"Unknown debug ID '%s' was asked to become active," \
" or regular expression '%s' did not get any match" \
" among known ids: %s" \
% (item, regexp_str, ids)
else:
toactivate = [item]
# Lets check if asked items are known
for item_ in toactivate:
if not (item_ in registered_keys):
raise ValueError, \
"Unknown debug ID %s was asked to become active" \
% item_
self.__active += toactivate
self.__active = list(set(self.__active)) # select just unique ones
self.__maxstrlength = max([len(str(x)) for x in self.__active] + [0])
if len(self.__active):
verbose(2, "Enabling debug handlers: %s" % `self.__active`)
##REF: Name was automagically refactored
def _set_printsetid(self, printsetid):
"""Either to print set Id at each line"""
self.__printsetid = printsetid
def __call__(self, setid, msg, *args, **kwargs):
"""
Write msg
It appends a newline since most commonly each call is a separate
message
"""
if setid in self.__active:
if len(msg) > 0 and self.__printsetid:
msg = "[%%-%ds] " % self.__maxstrlength % (setid) + msg
Logger.__call__(self, msg, *args, **kwargs)
def register(self, setid, description):
""" "Register" a new setid with a given description for easy finding
"""
if setid in self.__registered:
raise ValueError, \
"Setid %s is already known with description '%s'" % \
(`setid`, self.__registered[setid])
self.__registered[setid] = description
##REF: Name was automagically refactored
def set_active_from_string(self, value):
"""Given a string listing registered(?) setids, make then active
"""
# somewhat evil but works since verbose must be initiated
# by now
self.active = value.split(",")
def print_registered(self, detailed=True):
print "Registered debug entries: ",
kd = self.registered
rks = sorted(kd.keys())
maxl = max([len(k) for k in rks])
if not detailed:
# short list
print ', '.join(rks)
else:
print
for k in rks:
print '%%%ds %%s' % maxl % (k, kd[k])
printsetid = property(fget=lambda self: self.__printsetid, \
fset=_set_printsetid)
active = property(fget=lambda self: self.__active, fset=_set_active)
registered = property(fget=lambda self: self.__registered)
if __debug__:
import os, re
import traceback
import time
from os import getpid
from os.path import basename, dirname
__pymvpa_pid__ = getpid()
def parse_status(field='VmSize', value_only=False):
"""Return stat information on current process.
Usually it is needed to know where the memory is gone, that is
why VmSize is the default for the field to spit out
TODO: Spit out multiple fields. Use some better way than parsing proc
"""
regex = re.compile('^%s:' % field)
match = None
try:
for l in open('/proc/%d/status' % __pymvpa_pid__):
if regex.match(l):
match = l.strip()
break
if match:
match = re.sub('[ \t]+', ' ', match)
except IOError:
pass
if match and value_only:
match = match.split(':', 1)[1].lstrip()
return match
def get_vmem_from_status():
"""Return utilization of virtual memory
Deprecated implementation which relied on parsing proc/PID/status
"""
rss, vms = [parse_status(field=x, value_only=True)
for x in ['VmRSS', 'VmSize']]
if rss is None or vms is None:
# So not available on this system -- signal with negatives
# but do not crash
return (-1, -1)
if rss[-3:] == vms[-3:] and rss[-3:] == ' kB':
# the same units
rss = int(rss[:-3]) # strip from rss
vms = int(vms[:-3])
return (rss, vms)
try:
# we prefer to use psutil if available
# and let's stay away from "externals" module for now
# Note: importing as __Process so it does not get
# 'queried' by autodoc leading to an exception
# while being unable to get values for the properties
from psutil import Process as __Process
__pymvpa_process__ = __Process(__pymvpa_pid__)
def get_vmem():
"""Return utilization of virtual memory
Generic implementation using psutil
"""
mi = __pymvpa_process__.get_memory_info()
# in later versions of psutil mi is a named tuple.
# but that is not the case on Debian squeeze with psutil 0.1.3
rss = mi[0] / 1024
vms = mi[1] / 1024
return (rss, vms)
except ImportError:
get_vmem = get_vmem_from_status
def get_vmem_str():
"""Return a string summary about utilization of virtual_memory
"""
vmem = get_vmem()
try:
return "RSS/VMS: %d/%d kB" % vmem
except:
return "RSS/VMS: %s" % str(vmem)
def _get_vmem_max_str_gen():
"""Return peak vmem utilization so far.
It is a generator, get_vmem_max_str later is bound to .next
of it - to mimic static variables
"""
rss_max = 0
vms_max = 0
while True:
rss, vms = get_vmem()
rss_max = max(rss, rss_max)
vms_max = max(vms, vms_max)
yield "max RSS/VMS: %d/%d kB" % (rss_max, vms_max)
get_vmem_max_str = _get_vmem_max_str_gen().next
def mbasename(s):
"""Custom function to include directory name if filename is too common
Also strip .py at the end
"""
base = basename(s)
if base.endswith('.py'):
base = base[:-3]
if base in set(['base', '__init__']):
base = basename(dirname(s)) + '.' + base
return base
class TraceBack(object):
"""Customized traceback to be included in debug messages
"""
def __init__(self, collide=False):
"""Initialize TrackBack metric
Parameters
----------
collide : bool
if True then prefix common with previous invocation gets
replaced with ...
"""
self.__prev = ""
self.__collide = collide
def __call__(self):
ftb = traceback.extract_stack(limit=100)[:-2]
entries = [[mbasename(x[0]), str(x[1])] for x in ftb]
entries = [ e for e in entries if e[0] != 'unittest' ]
# lets make it more consize
entries_out = [entries[0]]
for entry in entries[1:]:
if entry[0] == entries_out[-1][0]:
entries_out[-1][1] += ',%s' % entry[1]
else:
entries_out.append(entry)
sftb = '>'.join(['%s:%s' % (mbasename(x[0]),
x[1]) for x in entries_out])
if self.__collide:
# lets remove part which is common with previous invocation
prev_next = sftb
common_prefix = os.path.commonprefix((self.__prev, sftb))
common_prefix2 = re.sub('>[^>]*$', '', common_prefix)
if common_prefix2 != "":
sftb = '...' + sftb[len(common_prefix2):]
self.__prev = prev_next
return sftb
class RelativeTime(object):
"""Simple helper class to provide relative time it took from previous
invocation"""
def __init__(self, format="%3.3f sec"):
"""
Parameters
----------
format : str
String format to use for reporting time.
"""
self.__prev = None
self.__format = format
def __call__(self):
dt = 0.0
ct = time.time()
if not self.__prev is None:
dt = ct - self.__prev
self.__prev = ct
return self.__format % dt
class DebugLogger(SetLogger):
"""
Logger for debugging purposes.
Expands SetLogger with ability to print some interesting information
(named Metric... XXX) about current process at each debug printout
"""
_known_metrics = {
# TODO: make up Windows-friendly version or pure Python platform
# independent version (probably just make use of psutil)
'vmem' : get_vmem_str,
'vmem_max' : get_vmem_max_str,
'pid' : getpid, # lambda : parse_status(field='Pid'),
'asctime' : time.asctime,
'tb' : TraceBack(),
'tbc' : TraceBack(collide=True),
}
def __init__(self, metrics=None, offsetbydepth=True, *args, **kwargs):
"""
Parameters
----------
metrics : iterable of (func or str) or None
What metrics (functions) to be reported. If item is a string,
it is matched against `_known_metrics` keys.
offsetbydepth : bool, optional
Either to offset lines depending on backtrace depth (default
behavior).
*args, **kwargs
Passed to SetLogger initialization XXX
"""
if metrics == None:
metrics = []
SetLogger.__init__(self, *args, **kwargs)
self.__metrics = []
self._offsetbydepth = offsetbydepth
self._reltimer = RelativeTime()
self._known_metrics = DebugLogger._known_metrics
self._known_metrics['reltime'] = self._reltimer
for metric in metrics:
self._registerMetric(metric)
##REF: Name was automagically refactored
def register_metric(self, func):
"""Register some metric to report
func can be either a function call or a string which should
correspond to known metrics
"""
if isinstance(func, basestring):
if func in ['all', 'ALL']:
func = self._known_metrics.keys()
if isinstance(func, basestring):
if func in DebugLogger._known_metrics:
func = DebugLogger._known_metrics[func]
else:
if func in ['?', 'list', 'help']:
print 'Known debug metrics: ', \
', '.join(DebugLogger._known_metrics.keys())
raise SystemExit(0)
else:
raise ValueError, \
"Unknown name %s for metric in DebugLogger" % \
func + " Known metrics are " + \
`DebugLogger._known_metrics.keys()`
elif isinstance(func, list):
self.__metrics = [] # reset
for item in func:
self.register_metric(item)
return
if not func in self.__metrics:
try:
from mvpa2.base import debug
debug("DBG", "Registering metric %s" % func)
self.__metrics.append(func)
except:
pass
def __call__(self, setid, msg, *args, **kwargs):
if setid not in self.registered:
raise ValueError, "Not registered debug ID %s" % setid
if not setid in self.active:
# don't even compute the metrics, since they might
# be statefull as RelativeTime
return
msg_ = ' / '.join([str(x()) for x in self.__metrics])
if len(msg_) > 0:
msg_ = "{%s}" % msg_
if len(msg) > 0:
# determine blank offset using backstacktrace
if self._offsetbydepth:
level = len(traceback.extract_stack()) - 2
else:
level = 1
if len(msg) > 250 and 'DBG' in self.active and not setid.endswith('_TB'):
tb = traceback.extract_stack(limit=2)
msg += " !!!2LONG!!!. From %s" % str(tb[0])
msg = "DBG%s:%s%s" % (msg_, " "*level, msg)
SetLogger.__call__(self, setid, msg, *args, **kwargs)
else:
msg = msg_
Logger.__call__(self, msg, *args, **kwargs)
##REF: Name was automagically refactored
def _set_offset_by_depth(self, b):
self._offsetbydepth = b
offsetbydepth = property(fget=lambda x:x._offsetbydepth,
fset=_set_offset_by_depth)
metrics = property(fget=lambda x:x.__metrics,
fset=register_metric)
if not __debug__:
class BlackHoleLogger(SetLogger):
'''A logger that does absolutely nothing - it is used as a fallback
so that debug(...) can still be called even if not __debug__'''
def __init__(self, metrics=None, offsetbydepth=True, *args, **kwargs):
'''Initializes the logger - ignores all input arguments'''
# do not be evil - initialize through the parent class
SetLogger.__init__(self, *args, **kwargs)
def __call__(self, setid, msg, *args, **kwargs):
pass
def register_metric(self, func):
pass
def register(self, setid, description):
pass
def set_active_from_string(self, value):
pass
def print_registered(self, detailed=True):
print "BlackHoleLogger: nothing registered "
|
py | b40c3a6ac6eab8548c8e7dbbdf62a8ee9a78ae75 |
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import radical.utils as ru
import radical.utils.signatures as rus
from .constants import *
from ..constants import SYNC, ASYNC, TASK
from ..adaptors import base as sab
from ..namespace import entry as nsentry
from .. import attributes as sa
from .. import session as ss
from .. import task as st
# ------------------------------------------------------------------------------
#
class Entry (nsentry.Entry, sa.Attributes) :
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional ((ru.Url, str)),
rus.optional (int, rus.nothing),
rus.optional (ss.Session),
rus.optional (sab.Base),
rus.optional (dict),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns (rus.nothing)
def __init__ (self, url=None, flags=READ, session=None,
_adaptor=None, _adaptor_state={}, _ttype=None) :
'''
url: saga.Url
flags: flags enum
session: saga.Session
ret: obj
'''
# param checks
url = ru.Url (url)
self._nsentry = super (Entry, self)
self._nsentry.__init__ (url, flags, session,
_adaptor, _adaptor_state, _ttype=_ttype)
# set attribute interface properties
self._attributes_allow_private (True)
self._attributes_camelcasing (True)
self._attributes_extensible (True, getter=self._attribute_getter,
setter=self._attribute_setter,
lister=self._attribute_lister,
caller=self._attribute_caller)
# register properties with the attribute interface
self._attributes_register (ATTRIBUTE, None, sa.STRING, sa.SCALAR, sa.READONLY)
self._attributes_register (OBJECT, None, sa.ANY, sa.SCALAR, sa.READONLY)
self._attributes_register (TTL, None, sa.INT, sa.SCALAR, sa.WRITEABLE)
self._attributes_set_setter (TTL, self.set_ttl)
self._attributes_set_getter (TTL, self.get_ttl)
self._attributes_set_setter (OBJECT, self.store_object)
self._attributes_set_getter (OBJECT, self.retrieve_object)
# --------------------------------------------------------------------------
#
@classmethod
@rus.takes ('Entry',
rus.optional ((ru.Url, str)),
rus.optional (int, rus.nothing),
rus.optional (ss.Session),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns (st.Task)
def create (cls, url=None, flags=READ, session=None, ttype=None) :
'''
url: saga.Url
flags: saga.advert.flags enum
session: saga.Session
ttype: saga.task.type enum
ret: saga.Task
'''
if not flags : flags = 0
_nsentry = super (Entry, cls)
return _nsentry.create (url, flags, session, ttype=ttype)
# --------------------------------------------------------------------------
#
# attribute methods
#
# NOTE: we do not yet pass ttype, as async calls are not yet supported by
# the attribute interface
#
@rus.takes ('Entry',
str,
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.anything, st.Task))
def _attribute_getter (self, key, ttype=None) :
return self._adaptor.attribute_getter (key)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
str,
rus.anything,
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def _attribute_setter (self, key, val, ttype=None) :
return self._adaptor.attribute_setter (key, val)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.list_of (rus.anything), st.Task))
def _attribute_lister (self, ttype=None) :
return self._adaptor.attribute_lister ()
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
str,
int,
callable,
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.anything, st.Task))
def _attribute_caller (self, key, id, cb, ttype=None) :
return self._adaptor.attribute_caller (key, id, cb)
# --------------------------------------------------------------------------
#
# advert methods
#
@rus.takes ('Entry',
float,
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def set_ttl (self, ttl=-1.0, ttype=None) :
"""
ttl : int
ttype: saga.task.type enum
ret: None / saga.Task
"""
return self._adaptor.set_ttl (ttl, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((float, st.Task))
def get_ttl (self, ttype=None) :
"""
ttype: saga.task.type enum
ret: int / saga.Task
"""
return self._adaptor.get_ttl (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
object,
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def store_object (self, object, ttype=None) :
"""
object : <object type>
ttype: saga.task.type enum
ret: None / saga.Task
"""
return self._adaptor.store_object (object, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((object, st.Task))
def retrieve_object (self, ttype=None) :
"""
ttype: saga.task.type enum
ret: any / saga.Task
"""
return self._adaptor.retrieve_object (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def delete_object (self, ttype=None) :
"""
ttype: saga.task.type enum
ret: None / saga.Task
"""
return self._adaptor.delete_object (ttype=ttype)
|
py | b40c3b050f2b375c21a0e18a09ca65b6b01bc7d2 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.account.tests.account_test_no_chart import TestAccountNoChartCommon
from odoo.addons.account.tests.account_test_multi_company_no_chart import TestAccountMultiCompanyNoChartCommon
class TestExpenseCommon(TestAccountNoChartCommon):
@classmethod
def setUpClass(cls):
super(TestExpenseCommon, cls).setUpClass()
cls.setUpUsers()
# The user manager is only expense manager
user_group_manager = cls.env.ref('hr_expense.group_hr_expense_manager')
cls.user_manager.write({
'groups_id': [(6, 0, [user_group_manager.id, cls.env.ref('base.group_user').id])],
})
# create employee
cls.employee = cls.env['hr.employee'].create({
'name': 'Johnny Employee',
'user_id': cls.user_employee.id,
'address_home_id': cls.user_employee.partner_id.id,
'address_id': cls.user_employee.partner_id.id,
})
# Create tax
cls.tax = cls.env['account.tax'].create({
'name': 'Expense 10%',
'amount': 10,
'amount_type': 'percent',
'type_tax_use': 'purchase',
'price_include': True,
})
# Create analytic account
cls.analytic_account = cls.env['account.analytic.account'].create({
'name': 'Test Analytic Account for Expenses',
})
# Expense reports
cls.journal = cls.env['account.journal'].create({
'name': 'Purchase Journal - Test',
'code': 'HRTPJ',
'type': 'purchase',
'company_id': cls.env.company.id,
})
cls.expense_sheet = cls.env['hr.expense.sheet'].create({
'name': 'Expense for Johnny Employee',
'employee_id': cls.employee.id,
'journal_id': cls.journal.id,
})
cls.expense_sheet2 = cls.env['hr.expense.sheet'].create({
'name': 'Second Expense for Johnny Employee',
'employee_id': cls.employee.id,
'journal_id': cls.journal.id,
})
Users = cls.env['res.users'].with_context(no_reset_password=True)
# Find Employee group
group_employee_id = cls.env.ref('base.group_user').id
cls.user_emp2 = Users.create({
'name': 'Superboy Employee',
'login': 'superboy',
'email': '[email protected]',
'groups_id': [(6, 0, [group_employee_id])]
})
cls.user_officer = Users.create({
'name': 'Batman Officer',
'login': 'batman',
'email': '[email protected]',
'groups_id': [(6, 0, [group_employee_id, cls.env.ref('hr_expense.group_hr_expense_team_approver').id])]
})
cls.emp_emp2 = cls.env['hr.employee'].create({
'name': 'Superboy',
'user_id': cls.user_emp2.id,
})
cls.emp_officer = cls.env['hr.employee'].create({
'name': 'Batman',
'user_id': cls.user_officer.id,
})
cls.emp_manager = cls.env['hr.employee'].create({
'name': 'Superman',
'user_id': cls.user_manager.id,
})
cls.rd = cls.env['hr.department'].create({
'name': 'R&D',
'manager_id': cls.emp_officer.id,
'member_ids': [(6, 0, [cls.employee.id])],
})
cls.ps = cls.env['hr.department'].create({
'name': 'PS',
'manager_id': cls.emp_manager.id,
'member_ids': [(6, 0, [cls.emp_emp2.id])],
})
cls.uom_unit = cls.env.ref('uom.product_uom_unit').id
cls.uom_dozen = cls.env.ref('uom.product_uom_dozen').id
cls.product_1 = cls.env['product.product'].create({
'name': 'Batmobile repair',
'type': 'service',
'uom_id': cls.uom_unit,
'uom_po_id': cls.uom_unit,
})
cls.product_2 = cls.env['product.product'].create({
'name': 'Superboy costume washing',
'type': 'service',
'uom_id': cls.uom_unit,
'uom_po_id': cls.uom_unit,
})
class TestExpenseMultiCompanyCommon(TestAccountMultiCompanyNoChartCommon):
@classmethod
def setUpClass(cls):
super(TestExpenseMultiCompanyCommon, cls).setUpClass()
cls.setUpAdditionalAccounts()
cls.setUpUsers()
# The user manager is only expense manager
user_group_manager = cls.env.ref('hr_expense.group_hr_expense_manager')
cls.user_manager.write({
'groups_id': [(6, 0, [user_group_manager.id, cls.env.ref('base.group_user').id])],
})
cls.user_manager_company_B.write({
'groups_id': [(6, 0, [user_group_manager.id, cls.env.ref('base.group_user').id])],
})
# create employee
cls.employee = cls.env['hr.employee'].create({
'name': 'Tyrion Lannister',
'user_id': cls.user_employee.id,
'address_home_id': cls.user_employee.partner_id.id,
'address_id': cls.user_employee.partner_id.id,
})
cls.employee_company_B = cls.env['hr.employee'].create({
'name': 'Gregor Clegane',
'user_id': cls.user_employee_company_B.id,
'address_home_id': cls.user_employee_company_B.partner_id.id,
'address_id': cls.user_employee_company_B.partner_id.id,
})
# Create tax
cls.tax = cls.env['account.tax'].create({
'name': 'Expense 10%',
'amount': 10,
'amount_type': 'percent',
'type_tax_use': 'purchase',
'price_include': True,
'company_id': cls.env.company.id
})
cls.tax_company_B = cls.env['account.tax'].create({
'name': 'Expense 10%',
'amount': 10,
'amount_type': 'percent',
'type_tax_use': 'purchase',
'price_include': True,
'company_id': cls.company_B.id
})
# Create analytic account
cls.analytic_account = cls.env['account.analytic.account'].create({
'name': 'Test Analytic Account for Expenses',
'company_id': cls.env.company.id,
})
cls.analytic_account_company_B = cls.env['account.analytic.account'].create({
'name': 'Test Analytic Account for Expenses',
'company_id': cls.company_B.id,
})
# Expense reports
cls.journal = cls.env['account.journal'].create({
'name': 'Purchase Journal - Test',
'code': 'HRTPJ',
'type': 'purchase',
'company_id': cls.env.company.id,
})
cls.journal_company_B = cls.env['account.journal'].create({
'name': 'Purchase Journal Company B - Test',
'code': 'HRTPJ',
'type': 'purchase',
'company_id': cls.company_B.id,
})
cls.expense_sheet = cls.env['hr.expense.sheet'].create({
'name': 'Expense for Tyrion',
'employee_id': cls.employee.id,
'journal_id': cls.journal.id,
})
cls.expense_sheet2 = cls.env['hr.expense.sheet'].create({
'name': 'Second Expense for Tyrion',
'employee_id': cls.employee.id,
'journal_id': cls.journal.id,
})
cls.product_1 = cls.env['product.product'].create({
'name': 'Sword sharpening',
'type': 'service',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'uom_po_id': cls.env.ref('uom.product_uom_unit').id,
'property_account_expense_id': cls.account_expense.id,
})
cls.product_2 = cls.env['product.product'].create({
'name': 'Armor cleaning',
'type': 'service',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'uom_po_id': cls.env.ref('uom.product_uom_unit').id,
'property_account_expense_id': cls.account_expense.id,
})
|
py | b40c3b2d272369491297154307604908e598407d | t = int(input())
for _ in range(t):
a, b, c = tuple(map(int, input().split()))
print("Case #%d: %s" % (_+1, 'true' if a + b > c else 'false'))
|
py | b40c3b83440c76d2c07cfb6babb8d0a077fa793f | from pyppeteer.page import Page
async def iframe_content_window(page: Page) -> None:
await page.evaluateOnNewDocument(
"""
() => {
try {
// Adds a contentWindow proxy to the provided iframe element
const addContentWindowProxy = iframe => {
const contentWindowProxy = {
get(target, key) {
// Now to the interesting part:
// We actually make this thing behave like a regular iframe window,
// by intercepting calls to e.g. `.self` and redirect it to the correct thing. :)
// That makes it possible for these assertions to be correct:
// iframe.contentWindow.self === window.top // must be false
if (key === 'self') {
return this
}
// iframe.contentWindow.frameElement === iframe // must be true
if (key === 'frameElement') {
return iframe
}
return Reflect.get(target, key)
}
}
if (!iframe.contentWindow) {
const proxy = new Proxy(window, contentWindowProxy)
Object.defineProperty(iframe, 'contentWindow', {
get() {
return proxy
},
set(newValue) {
return newValue // contentWindow is immutable
},
enumerable: true,
configurable: false
})
}
}
// Handles iframe element creation, augments `srcdoc` property so we can intercept further
const handleIframeCreation = (target, thisArg, args) => {
const iframe = target.apply(thisArg, args)
// We need to keep the originals around
const _iframe = iframe
const _srcdoc = _iframe.srcdoc
// Add hook for the srcdoc property
// We need to be very surgical here to not break other iframes by accident
Object.defineProperty(iframe, 'srcdoc', {
configurable: true, // Important, so we can reset this later
get: function() {
return _iframe.srcdoc
},
set: function(newValue) {
addContentWindowProxy(this)
// Reset property, the hook is only needed once
Object.defineProperty(iframe, 'srcdoc', {
configurable: false,
writable: false,
value: _srcdoc
})
_iframe.srcdoc = newValue
}
})
return iframe
}
// Adds a hook to intercept iframe creation events
const addIframeCreationSniffer = () => {
/* global document */
const createElement = {
// Make toString() native
get(target, key) {
return Reflect.get(target, key)
},
apply: function(target, thisArg, args) {
const isIframe =
args && args.length && `${args[0]}`.toLowerCase() === 'iframe'
if (!isIframe) {
// Everything as usual
return target.apply(thisArg, args)
} else {
return handleIframeCreation(target, thisArg, args)
}
}
}
// All this just due to iframes with srcdoc bug
document.createElement = new Proxy(
document.createElement,
createElement
)
}
// Let's go
addIframeCreationSniffer()
} catch (err) {
// console.warn(err)
}
}
"""
)
|
py | b40c3baa797d94ea7a8cd68d7f1f80eb608081be | import copy
import importlib
import os
import time
import numpy
import ray
import torch
from torch.utils.tensorboard import SummaryWriter
import models
import replay_buffer
import self_play
import shared_storage
import trainer
class MuZero:
"""
Main class to manage MuZero.
Args:
game_name (str): Name of the game module, it should match the name of a .py file
in the "./games" directory.
Example:
>>> muzero = MuZero("cartpole")
>>> muzero.train()
>>> muzero.test()
"""
def __init__(self, game_name):
self.game_name = game_name
# Load the game and the config from the module with the game name
try:
game_module = importlib.import_module("games." + self.game_name)
self.config = game_module.MuZeroConfig()
self.Game = game_module.Game
except Exception as err:
print(
'{} is not a supported game name, try "cartpole" or refer to the documentation for adding a new game.'.format(
self.game_name
)
)
raise err
os.makedirs(os.path.join(self.config.results_path), exist_ok=True)
# Fix random generator seed for reproductibility
numpy.random.seed(self.config.seed)
torch.manual_seed(self.config.seed)
# Initial weights used to initialize components
self.muzero_weights = models.MuZeroNetwork(
self.config.observation_shape,
self.config.stacked_observations,
len(self.config.action_space),
self.config.encoding_size,
self.config.hidden_layers,
self.config.support_size,
).get_weights()
def train(self):
ray.init()
writer = SummaryWriter(
os.path.join(self.config.results_path, self.game_name + "_summary")
)
# Initialize workers
training_worker = trainer.Trainer.options(
num_gpus=1 if "cuda" in self.config.training_device else 0
).remote(copy.deepcopy(self.muzero_weights), self.config)
shared_storage_worker = shared_storage.SharedStorage.remote(
copy.deepcopy(self.muzero_weights), self.game_name, self.config,
)
replay_buffer_worker = replay_buffer.ReplayBuffer.remote(self.config)
self_play_workers = [
self_play.SelfPlay.remote(
copy.deepcopy(self.muzero_weights),
self.Game(self.config.seed + seed),
self.config,
)
for seed in range(self.config.num_actors)
]
test_worker = self_play.SelfPlay.remote(
copy.deepcopy(self.muzero_weights), self.Game(), self.config
)
# Launch workers
[
self_play_worker.continuous_self_play.remote(
shared_storage_worker, replay_buffer_worker
)
for self_play_worker in self_play_workers
]
test_worker.continuous_self_play.remote(shared_storage_worker, None, True)
training_worker.continuous_update_weights.remote(
replay_buffer_worker, shared_storage_worker
)
# Loop for monitoring in real time the workers
print(
"\nTraining...\nRun tensorboard --logdir ./ and go to http://localhost:6006/ to see in real time the training performance.\n"
)
counter = 0
infos = ray.get(shared_storage_worker.get_infos.remote())
try:
while infos["training_step"] < self.config.training_steps:
# Get and save real time performance
infos = ray.get(shared_storage_worker.get_infos.remote())
writer.add_scalar(
"1.Total reward/Total reward", infos["total_reward"], counter
)
writer.add_scalar(
"2.Workers/Self played games",
ray.get(replay_buffer_worker.get_self_play_count.remote()),
counter,
)
writer.add_scalar(
"2.Workers/Training steps", infos["training_step"], counter
)
writer.add_scalar("3.Loss/1.Total loss", infos["total_loss"], counter)
writer.add_scalar("3.Loss/Value loss", infos["value_loss"], counter)
writer.add_scalar("3.Loss/Reward loss", infos["reward_loss"], counter)
writer.add_scalar("3.Loss/Policy loss", infos["policy_loss"], counter)
print(
"Last test reward: {0:.2f}. Training step: {1}/{2}. Played games: {3}. Loss: {4:.2f}".format(
infos["total_reward"],
infos["training_step"],
self.config.training_steps,
ray.get(replay_buffer_worker.get_self_play_count.remote()),
infos["total_loss"],
),
end="\r",
)
counter += 1
time.sleep(3)
except KeyboardInterrupt as err:
# Comment the line below to be able to stop the training but keep running
raise err
pass
self.muzero_weights = ray.get(shared_storage_worker.get_weights.remote())
ray.shutdown()
def test(self, render, muzero_player):
"""
Test the model in a dedicated thread.
Args:
render : boolean to display or not the environment.
muzero_player : Integer with the player number of MuZero in case of multiplayer
games, None let MuZero play all players turn by turn.
"""
print("\nTesting...")
ray.init()
self_play_workers = self_play.SelfPlay.remote(
copy.deepcopy(self.muzero_weights), self.Game(), self.config
)
test_rewards = []
for _ in range(self.config.test_episodes):
history = ray.get(
self_play_workers.play_game.remote(0, render, muzero_player)
)
test_rewards.append(sum(history.rewards))
ray.shutdown()
return test_rewards
def load_model(self, path=None):
if not path:
path = os.path.join(self.config.results_path, self.game_name)
try:
self.muzero_weights = torch.load(path)
print("Using weights from {}".format(path))
except FileNotFoundError:
print("There is no model saved in {}.".format(path))
if __name__ == "__main__":
# Use the game and config from the ./games folder
muzero = MuZero("cartpole")
## Train
muzero.train()
## Test
muzero.load_model()
# Render some self-played games
muzero.test(render=True, muzero_player=None)
# Let user play against MuZero (MuZero is player 0 here)
# muzero.test(render=True, muzero_player=0)
|
py | b40c3bdacdf1d30c5d8902609496344e13ebebec | ########################################################################
#
# Wrapper for ODD discrimination algorithm build on DEAP library (http://deap.readthedocs.org/en/latest/api/algo.html)
#
########################################################################
# Add main directory to enable imports
if __name__ == '__main__' :
import os
os.sys.path.append(os.path.abspath('../..'))
########################################################################
import wx
# Real time plotting
import visvis
# GUI components
from libs.gui.basic_window import BasicWindow
from odd_tab import ODD_Tab
# Hardware
#from libs.dev.spectrometer_ocean_optics import ManagerOceanOpticsSpectrometer as ManagerSpectrometer
#from libs.dev.spectrometer_ocean_optics import OceanOpticsSpectrometerTab as SpectrometerTab
from libs.dev.camera_istar import ManagerIStarCamera as ManagerSpectrometer
from libs.dev.camera_istar import IStarCameraTab as SpectrometerTab
from libs.dev.pulse_shaper import ManagerShaper, PulseShaperTab
from libs.dev.sample_switcher import ManagerSampleSwitcher, SampleSwitcherTab
########################################################################
class SettingsNotebook (wx.Notebook) :
"""
GUI for listing all settings
"""
def __init__(self, parent, DevSpectrometer, DevSampleSwitcher, DevPulseShaper ):
"""
`DevSpectrometer` is a spectrometer manager
"""
wx.Notebook.__init__(self, parent)
self.ODD_GA = ODD_Tab(self)
self.AddPage(self.ODD_GA, "ODD GA")
self.Spectrometer = SpectrometerTab(self, DevSpectrometer)
self.AddPage (self.Spectrometer, "Spectrometer")
self.SampleSwitcher = SampleSwitcherTab(self, DevSampleSwitcher)
self.AddPage (self.SampleSwitcher, "Sample switcher")
self.PulseShaper = PulseShaperTab(self, DevPulseShaper)
self.AddPage (self.PulseShaper, "Pulse shaper")
# Dictionary to bind names to tabs for saving and loading settings
self.settings_to_tabs = {"Spectrometer" : self.Spectrometer,
"PulseShaper" : self.PulseShaper, "ODD_GA" : self.ODD_GA,
"SampleSwitcher" : self.SampleSwitcher }
########################################################################
class ODDExperiment (BasicWindow) :
def __init__ (self, parent) :
# Starting spectrometer
self.Spectrometer = ManagerSpectrometer()
self.SpectrometerProc = self.Spectrometer.start()
# Starting pulse shaper
self.PulseShaper = ManagerShaper()
self.PulseShaperProc = self.PulseShaper.start()
# Start sample switcher
self.SampleSwitcher = ManagerSampleSwitcher()
self.ManagerSampleSwitcherProc = self.SampleSwitcher.start()
# Create GUI
dw, dh = wx.DisplaySize()
wx.Frame.__init__ (self, parent, title="ODD for multiple fluoresce marker concentration measurements",
size=(0.9*dw, 0.88*dh) )
self.ConstructGUI ()
self.Center()
self.Maximize()
self.Show ()
wx.EVT_CLOSE (self, self.on_close)
def __del__ (self) :
# Close spectrometer
self.Spectrometer.exit(); self.SpectrometerProc.join()
# Close pulse shaper
self.PulseShaper.exit(); self.PulseShaperProc.join()
# Close sample switcher
self.SampleSwitcher.exit(); self.ManagerSampleSwitcherProc.join()
def ConstructGUI (self) :
""" Build GUI """
self.panel = wx.Panel(self)
sizer = wx.GridBagSizer ()
############################ Settings Notebook ############################
self.SettingsNotebook = SettingsNotebook(self.panel, self.Spectrometer, self.SampleSwitcher, self.PulseShaper)
sizer.Add(self.SettingsNotebook, pos=(0, 0), span=(1, 1), flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)
############################ Command panel ############################
boxsizer = wx.BoxSizer (wx.VERTICAL)
# Interactively display spectrum
boxsizer.Add (self.CreateShowSpectrumButton(), flag=wx.EXPAND, border=5)
# Separator
boxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)
# Separator
boxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)
# Send random phase to the pulse shaper
boxsizer.Add (self.CreateRandomPhaseButton(), flag=wx.EXPAND, border=5)
# Send random amplitude to the pulse shaper
boxsizer.Add (self.CreateRandomAmplitudeButton(), flag=wx.EXPAND, border=5)
# Send zero amplitude and zero phase to the pulse shaper
boxsizer.Add (self.CreateZeroAmplitudeButton(), flag=wx.EXPAND, border=5)
# Open pulse shaper equalizer
boxsizer.Add (self.CreatePulseShaperEqualizerButton(), flag=wx.EXPAND, border=5)
# Separator
boxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)
# Save settings
boxsizer.Add( self.CreateSaveSettingsButton(), flag=wx.EXPAND, border=5)
# Load settings
boxsizer.Add( self.CreateLoadSettingsButton(), flag=wx.EXPAND, border=5)
sizer.Add(boxsizer, pos=(1, 0), span=(1, 1), flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT|wx.GROW, border=10)
########################### End of constructing panel ######################################
self.panel.SetSizer (sizer)
############################# Setting visvis #######################################
Figure = app.GetFigureClass()
self.fig = Figure(self)
boxsizer = wx.BoxSizer (wx.HORIZONTAL)
boxsizer.Add(self.panel, 0.5, wx.EXPAND)
boxsizer.Add(self.fig._widget, 2, wx.EXPAND)
#########################################################################################
self.SetSizer (boxsizer)
self.SetAutoLayout(True)
self.Layout()
#########################################################################
if __name__ == '__main__' :
app = visvis.use('wx')
app.Create()
ODDExperiment (None)
app.Run() |
py | b40c3c4b545635868fbbc973b65a0dec6f0a2274 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
"""Validator classes are used for ComplexInputs, to validate the content
"""
import logging
from pywps.validator.mode import MODE
from pywps.inout.formats import FORMATS
from urllib.request import urlopen
import mimetypes
import os
LOGGER = logging.getLogger('PYWPS')
def validategml(data_input, mode):
"""GML validation function
:param data_input: :class:`ComplexInput`
:param pywps.validator.mode.MODE mode:
This function validates GML input based on given validation mode. Following
happens, if `mode` parameter is given:
`MODE.NONE`
it will return always `True`
`MODE.SIMPLE`
the mimetype will be checked
`MODE.STRICT`
`GDAL/OGR <http://gdal.org/>`_ is used for getting the proper format.
`MODE.VERYSTRICT`
the :class:`lxml.etree` is used along with given input `schema` and the
GML file is properly validated against given schema.
"""
LOGGER.info('validating GML; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GML.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GML")
else:
passed = False
if mode >= MODE.VERYSTRICT:
from lxml import etree
try:
schema_url = data_input.data_format.schema
gmlschema_doc = etree.parse(urlopen(schema_url))
gmlschema = etree.XMLSchema(gmlschema_doc)
passed = gmlschema.validate(etree.parse(data_input.stream))
except Exception as e:
LOGGER.warning(e)
passed = False
return passed
def validatexml(data_input, mode):
"""XML validation function
:param data_input: :class:`ComplexInput`
:param pywps.validator.mode.MODE mode:
This function validates XML input based on given validation mode. Following
happens, if `mode` parameter is given:
`MODE.NONE`
it will return always `True`
`MODE.SIMPLE`
the mimetype will be checked
`MODE.STRICT` and `MODE.VERYSTRICT`
the :class:`lxml.etree` is used along with given input `schema` and the
XML file is properly validated against given schema.
"""
LOGGER.info('validating XML; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GML.mime_type}
if mode >= MODE.STRICT:
from lxml import etree
# TODO: Raise the actual validation exception to make it easier to spot the error.
# xml = etree.parse(data_input.file)
# schema.assertValid(xml)
try:
fn = os.path.join(_get_schemas_home(), data_input.data_format.schema)
schema_doc = etree.parse(fn)
schema = etree.XMLSchema(schema_doc)
passed = schema.validate(etree.parse(data_input.file))
except Exception as e:
LOGGER.warning(e)
passed = False
return passed
def validatejson(data_input, mode):
"""JSON validation function
:param data_input: :class:`ComplexInput`
:param pywps.validator.mode.MODE mode:
This function validates JSON input based on given validation mode. Following
happens, if `mode` parameter is given:
`MODE.NONE`
No validation, returns `True`.
`MODE.SIMPLE`
Returns `True` if the mime type is correct.
`MODE.STRICT`
Returns `True` if the content can be interpreted as a json object.
"""
LOGGER.info('validating JSON; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.JSON.mime_type}
if mode >= MODE.STRICT:
import json
try:
with open(data_input.file) as f:
json.load(f)
passed = True
except ValueError:
passed = False
return passed
def validategeojson(data_input, mode):
"""GeoJSON validation example
>>> import StringIO
>>> class FakeInput(object):
... json = open('point.geojson','w')
... json.write('''{"type":"Feature", "properties":{}, "geometry":{"type":"Point", "coordinates":[8.5781228542328, 22.87500500679]}, "crs":{"type":"name", "properties":{"name":"urn:ogc:def:crs:OGC:1.3:CRS84"}}}''') # noqa
... json.close()
... file = 'point.geojson'
>>> class fake_data_format(object):
... mimetype = 'application/geojson'
>>> fake_input = FakeInput()
>>> fake_input.data_format = fake_data_format()
>>> validategeojson(fake_input, MODE.SIMPLE)
True
"""
LOGGER.info('validating GeoJSON; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GEOJSON.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GeoJSON")
else:
passed = False
if mode >= MODE.VERYSTRICT:
import jsonschema
import json
# this code comes from
# https://github.com/om-henners/GeoJSON_Validation/blob/master/geojsonvalidation/geojson_validation.py
schema_home = os.path.join(_get_schemas_home(), "geojson")
base_schema = os.path.join(schema_home, "geojson.json")
with open(base_schema) as fh:
geojson_base = json.load(fh)
with open(os.path.join(schema_home, "crs.json")) as fh:
crs_json = json.load(fh)
with open(os.path.join(schema_home, "bbox.json")) as fh:
bbox_json = json.load(fh)
with open(os.path.join(schema_home, "geometry.json")) as fh:
geometry_json = json.load(fh)
cached_json = {
"http://json-schema.org/geojson/crs.json": crs_json,
"http://json-schema.org/geojson/bbox.json": bbox_json,
"http://json-schema.org/geojson/geometry.json": geometry_json
}
resolver = jsonschema.RefResolver(
"http://json-schema.org/geojson/geojson.json",
geojson_base, store=cached_json)
validator = jsonschema.Draft4Validator(geojson_base, resolver=resolver)
try:
validator.validate(json.loads(data_input.stream.read()))
passed = True
except jsonschema.ValidationError:
passed = False
return passed
def validateshapefile(data_input, mode):
"""ESRI Shapefile validation example
"""
LOGGER.info('validating Shapefile; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.SHP.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
import zipfile
z = zipfile.ZipFile(data_input.file)
shape_name = None
for name in z.namelist():
z.extract(name, data_input.tempdir)
if os.path.splitext(name)[1].lower() == '.shp':
shape_name = name
if shape_name:
data_source = ogr.Open(os.path.join(data_input.tempdir, shape_name))
if data_source:
passed = (data_source.GetDriver().GetName() == "ESRI Shapefile")
else:
passed = False
return passed
def validategeotiff(data_input, mode):
"""GeoTIFF validation example
"""
LOGGER.info('Validating Shapefile; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GEOTIFF.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import gdal
data_source = gdal.Open(data_input.file)
passed = (data_source.GetDriver().ShortName == "GTiff")
except ImportError:
passed = False
return passed
def validatenetcdf(data_input, mode):
"""netCDF validation.
"""
LOGGER.info('Validating netCDF; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.NETCDF.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import netCDF4 as nc
nc.Dataset(data_input.file)
passed = True
except ImportError as e:
passed = False
LOGGER.exception("ImportError while validating netCDF4 file {}:\n {}".format(data_input.file, e))
except IOError as e:
passed = False
LOGGER.exception("IOError while validating netCDF4 file {}:\n {}".format(data_input.file, e))
return passed
def validatedods(data_input, mode):
"""OPeNDAP validation.
"""
LOGGER.info('Validating OPeNDAP; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.url
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.DODS.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import netCDF4 as nc
nc.Dataset(data_input.url)
passed = True
except ImportError as e:
passed = False
LOGGER.exception("ImportError while validating OPeNDAP link {}:\n {}".format(data_input.url, e))
except IOError as e:
passed = False
LOGGER.exception("IOError while validating OPeNDAP link {}:\n {}".format(data_input.url, e))
return passed
def _get_schemas_home():
"""Get path to schemas directory
"""
schema_dir = os.path.join(
os.path.abspath(
os.path.dirname(__file__)
),
os.path.pardir,
"schemas")
LOGGER.debug('Schemas directory: {}'.format(schema_dir))
return schema_dir
|
py | b40c3cb6275cc43eced63e07a1f00753f86b8e43 | import csv
import os
import matplotlib.pyplot as plt
import numpy as np
from constants import *
# dpi of figure
DPI=500
# number of previous values to calculate average
TREND_ACCURACY=150
def plot_func(*args):
iterations = []
scores = []
averages = []
if len(args) > 0:
file_name = args[0]
file_image = file_name.replace('data/data', 'images/image')
file_image = file_image.replace('csv', 'png')
else:
file_name = FILE_DATA
file_image = FILE_IMAGE
#max_s = 0
#max_a = 0
namea = file_name.split('_')
change = len(namea) >= 9 and namea[2] == '15' and namea[6] == '1.0' and namea[8].split('.')[0] == '5'
with open(file_name, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
iterations.append(float(row[0]))
scores.append(float(row[1]))
#max_s = int(max(max_s, int(row[1])))
# calculating trend
averages.append(np.mean(scores[-TREND_ACCURACY:]))
#max_a = max(max_a, np.mean(scores[-TREND_ACCURACY:]))
#du 15 di 1.0 sz 5
if change and len(iterations) == 7000 and namea[4] != '1e-05':
break;
plt.xlim(len(iterations) + 150)
plt.gca().invert_xaxis()
plt.xlabel('Iterations')
plt.ylabel('Scores')
plt.title("Results for: " + file_image.replace('images/image_',''))
# plotting real values
plt.plot(iterations, scores, 'r.')
# plotting trend
plt.plot(iterations, averages, "b")
plt.savefig(file_image, dpi=DPI)
plt.clf()
#print(max_s)
#print(max_a)
if __name__ == '__main__':
plot_func()
|
py | b40c3d03448b09d873120b97b1a26bc00681f9fd | from decimal import Decimal
from cryptofeed.defines import TRADES
from cryptofeed.exchanges import Bitmex
from cryptofeed.standards import timestamp_normalize
class BitmexBlotter(Bitmex):
async def _trade(self, msg: dict, timestamp: float):
"""
trade msg example
{
'timestamp': '2018-05-19T12:25:26.632Z',
'symbol': 'XBTUSD',
'side': 'Buy',
'size': 40,
'price': 8335,
'tickDirection': 'PlusTick',
'trdMatchID': '5f4ecd49-f87f-41c0-06e3-4a9405b9cdde',
'grossValue': 479920,
'homeNotional': Decimal('0.0047992'),
'foreignNotional': 40
}
"""
for data in msg["data"]:
ts = timestamp_normalize(self.id, data["timestamp"])
price = Decimal(data["price"])
volume = Decimal(data["foreignNotional"])
notional = volume / price
await self.callback(
TRADES,
feed=self.id,
uid=data["trdMatchID"],
symbol=data["symbol"], # Do not normalize
timestamp=ts,
price=price,
volume=volume,
notional=notional,
tickRule=1 if data["side"] == "Buy" else -1,
)
|
py | b40c3d6a1285e0f00c92d7470aa221dfd08d5526 | import nltk
from nltk.tag import AffixTagger
from nltk.corpus import treebank
testing = treebank.tagged_sents()[2000:]
training= treebank.tagged_sents()[:7000]
affixtag = AffixTagger(training)
print(affixtag.evaluate(testing))
|
py | b40c3db0e0a9fa4aa167960e124ea7c51ddf84da | for i in range(10,-1,-1):
print(i) |
py | b40c3ddee1d8bc6a40cf850b02711da92c091fd7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from cleep.libs.internals.event import Event
class SystemResourceReleasedEvent(Event):
"""
system.resource.released event
"""
EVENT_NAME = u'system.resource.released'
EVENT_PROPAGATE = False
EVENT_PARAMS = [u'resource', u'module']
def __init__(self, params):
"""
Constructor
Args:
params (dict): event parameters
"""
Event.__init__(self, params)
|
py | b40c3df51e9779fa2c3f9a14b8e929b2bee6ca20 | """
Pyth -- Python text markup and conversion
"""
import os.path
__version__ = '0.5.6'
writerMap = {
'.rtf': 'pyth.plugins.rtf15.writer.Rtf15Writer',
'.html': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.xhtml': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.txt': 'pyth.plugins.plaintext.writer.PlaintextWriter',
'.pdf': 'pyth.plugins.pdf.writer.PDFWriter',
}
mimeMap = {
'.rtf': 'application/rtf',
'.html': 'text/html',
'.xhtml': 'application/xhtml+xml',
'.txt': 'text/plain',
}
def write(doc, filename):
ext = os.path.splitext(filename)[1]
writer = namedObject(writerMap[ext])
buff = writer.write(doc)
buff.seek(0)
return (buff, mimeMap[ext])
# Stolen from twisted.python.reflect
def namedModule(name):
"""Return a module given its name."""
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m
def namedObject(name):
"""Get a fully named module-global object.
"""
classSplit = name.split('.')
module = namedModule('.'.join(classSplit[:-1]))
return getattr(module, classSplit[-1])
|
py | b40c3e5ba96bba3463444962268e032936235eef | #List Comprehensions its better option then map and filter
items = [
("Product-1",10),
("Product-2",20),
("Product-3",30)
]
maplist = list(map(lambda item: item[1],items))
print(maplist)
# maplistCo = [expression for item in items ]
maplistCo = [item[1] for item in items]
print(maplistCo)
filterlist = list(filter(lambda item : item[1] > 10,items))
print(filterlist)
filterlistCo = [item for item in items if item[1] > 10]
print(filterlistCo)
|
py | b40c4193f93ee7333150d6ff6301e091ba7d46ef | __file__
import pytest
import configparser
from playwright.sync_api import sync_playwright
from os import path
config_ini = configparser.ConfigParser()
config_ini.read( "conf.ini", encoding = "utf-8" )
print("SET_TIMEOUT = " + config_ini['DEFAULT']['SETTIMEOUT'])
print("SET_WAIT = " + config_ini['DEFAULT']['SETWAIT'])
print("SET_WAIT = " + config_ini['DEFAULT']['SETWFDAY'])
print("ACTIVE_LOCK = " + config_ini['DEFAULT']['ACTIVELOCK'])
print("WEKO_URL = " + config_ini['DEFAULT']['WEKOURL'])
SET_TIMEOUT = config_ini['DEFAULT']['SETTIMEOUT']
SET_WAIT = config_ini['DEFAULT']['SETWAIT']
SET_WFDAY = config_ini['DEFAULT']['SETWFDAY']
ACTIVE_LOCK = config_ini['DEFAULT']['ACTIVELOCK']
WEKO_URL = config_ini['DEFAULT']['WEKOURL']
def run(playwright):
browser = playwright.chromium.launch(headless=False)
context = browser.new_context(ignore_https_errors=True)
# Open new page
page = context.new_page()
# Go to https://localhost/
page.goto(WEKO_URL,timeout=int(SET_TIMEOUT))
# Click text=/.*Log in.*/
page.click("text=/.*Log in.*/")
# assert page.url == "https://localhost/login/?next=%2F"
# Fill input[name="email"]
page.fill("input[name=\"email\"]", "[email protected]")
# Fill input[name="password"]
page.fill("input[name=\"password\"]", "uspass123")
# Click text=/.*Log In.*/
page.click("text=/.*Log In.*/")
# assert page.url == "https://localhost/"
# Click text="Workflow"
page.click("text=\"Workflow\"")
# assert page.url == "https://localhost/workflow/"
if SET_WFDAY == "NEW":
# Click text=/.*New Activity.*/
page.click("text=/.*New Activity.*/")
with page.expect_navigation():
page.click("//tr[3]/td[4]/button[normalize-space(.)=' New']")
else:
# Go to https://localhost/workflow/activity/detail/A-20220203-00001
page.goto("https://localhost/workflow/activity/detail/A-" + SET_WFDAY,timeout=int(SET_TIMEOUT))
# Click div[id="activity_locked"] >> text="OK"
if ACTIVE_LOCK == "ON":
page.click("div[id=\"activity_locked\"] >> text=\"OK\"")
# assert page.url == "https://localhost/workflow/activity/detail/A-20220203-00001?status="
# Click input[name="pubdate"]
page.click("input[name=\"pubdate\"]")
# Click text="02"
page.click("text=\"02\"")
# Fill input[name="item_1617186331708.0.subitem_1551255647225"]
page.fill("input[name=\"item_1617186331708.0.subitem_1551255647225\"]", "登録テストアイテム1")
# Click input[name="item_1617186331708.0.subitem_1551255647225"]
page.click("input[name=\"item_1617186331708.0.subitem_1551255647225\"]")
# Select string:ja
page.select_option("//div[normalize-space(.)='jaja-Kanaenfritdeeszh-cnzh-twrulamseoarelko']/select", "string:ja")
# Select string:conference paper
# page.select_option("//div[starts-with(normalize-space(.), 'conference paperdata paperdepartmental bulletin papereditorialjournal articlenew')]/select", "string:sound")
# Resource Type が見える位置に画面を来させる為に、Version Typeをクリック
page.click("//*[@id='weko-records']/invenio-files-uploader/invenio-records/div[2]/div[8]/invenio-records-form/div/div/form/bootstrap-decorator[17]/fieldset/div/div[1]/a")
page.wait_for_timeout(int(SET_WAIT))
page.screenshot(path=f'{path.splitext(path.basename(__file__))[0]+"_1"}_capture.png')
page.click('//*[@id="weko-records"]/invenio-files-uploader/invenio-records/div[2]/div[9]/div/div[1]/div/button[2]')
page.wait_for_timeout(int(SET_WAIT))
page.screenshot(path=f'{path.splitext(path.basename(__file__))[0]+"_2"}_capture.png')
# Close page
page.close()
# ---------------------
context.close()
browser.close()
return 0
def test_OK():
assert a == 0
with sync_playwright() as playwright:
a = run(playwright)
test_OK() |
py | b40c42cd11e1ab99b2a3e9c26727bb4db2bd925b | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-09-29 01:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.TextField()),
('created_time', models.DateTimeField()),
('modified_time', models.DateTimeField()),
('excerpt', models.CharField(blank=True, max_length=200)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, to='blog.Tag'),
),
]
|
py | b40c44985271dd334ac654bf90457146c2fc5080 | #--- Exercicio 1 - Input, Estrutura de decisão e operações matemáticas
#--- Crie um programa que leia dois números inteiros
#--- Realize as 4 operações matemáticas básicas com os números lidos
#--- Imprima os resultados das operações
#--- Informe qual número é maior ou se os dois são iguais
n1 = float(input('Informe o primeiro número: '))
n2 = float(input('Informe o segundo número: '))
print(f'{n1} + {n2} = {n1+n2}')
print(f'{n1} - {n2} = {n1-n2}')
print(f'{n1} / {n2} = {n1/n2}')
print(f'{n1} * {n2} = {n1*n2}')
if n1 > n2:
print(f'O maior número é {n1}.')
elif n2 == n2:
print(f'Os dois números são iguais.')
else:
print(f'O máior número é {n2}.') |
py | b40c463683ea1c606a2dd6fca945ed3a32a19600 | ################################################################################
class Blacklist:
def __init__(self):
pass
################################################################################
class Security:
def __init__(self):
self.blacklist = Blacklist()
def client_connect(self, proxy):
return self.blacklist.verifyproxy.client.addr[0]
pass
def client_check(self, proxy):
pass
################################################################################
|
py | b40c464d654f91e5a95dcabe052a29f4e5b5cb66 | import random
import characterclass
from dice import d, xdy
class BasicAttributesMixin(object):
"""
Generates the basic attributes of a D&D character: STR, INT, DEX, CON, WIS,
CHA. The scores are rolled using 3d6 in order.
"""
def __init__(self, *args, **kwargs):
self.attributes = self.roll_attribute_scores()
# attribute map to ease display in template
self.attr = dict((attr, self.with_bonus(attr, value))
for attr, value in self.attributes)
@property
def STR(self): return self.attributes[characterclass.STR][1]
@property
def INT(self): return self.attributes[characterclass.INT][1]
@property
def DEX(self): return self.attributes[characterclass.DEX][1]
@property
def CON(self): return self.attributes[characterclass.CON][1]
@property
def WIS(self): return self.attributes[characterclass.WIS][1]
@property
def CHA(self): return self.attributes[characterclass.CHA][1]
def roll_attribute_scores(self):
"""
Rolls the attribute scores: 3d6 in order, as one would expect.
"""
return [(attribute, xdy(3, 6)) for attribute in characterclass.ATTRIBUTES]
def get_bonus(self, attr, val):
"""
Return the bonus for the given attribute (the Moldvay D&D attribute
bonuses.) Most sub-classes will override. Bonuses on attributes differ
from edition to edition.
"""
if val <= 3:
bonus = -3
elif 4 <= val <= 5:
bonus = -2
elif 6 <= val <= 8:
bonus = -1
elif 9 <= val <= 12:
bonus = 0
elif 13 <= val <= 15:
bonus = 1
elif 16 <= val <= 17:
bonus = 2
else:
bonus = 3
return bonus
def with_bonus(self, attr, val):
"""
Return attribute value with bonus attached, for display.
"""
bonus = self.get_bonus(attr, val)
if bonus:
return "%d (%+d)" % (val, bonus)
return "%d" % val
class NameMixin(object):
"""
Generate a random name for this character.
"""
@property
def name(self):
race = self.class_name if self.class_name in ["Dwarf", "Elf", "Halfling"] else "Human"
gender = self.appearance.split(", ")[0]
if gender not in ["Male", "Female"]:
gender = random.choice(["Male", "Female"])
return '%s %s' % (random.choice(characterclass.NAMES[race][gender]), random.choice(characterclass.NAMES[race]["Last"]))
class AppearanceMixin(object):
"""
Display the appearance of the character. This is the best part of this
generator. It's all ugly murderhobo children.
"""
def get_appearance(self):
return ', '.join(random.choice(feature) for feature in characterclass.APPEARANCE)
class AscendingAcMixin(object):
"""
Display the attack bonuses rather than a to-hit table. AC is ascending.
The assumptions here are from LotFP.
"""
@property
def base_armour_class(self):
"""
The default armour class of an unarmoured combatant is 10.
"""
return 12
@property
def attack_bonus(self):
return 2 if self.character_class == characterclass.FIGHTER else 1
@property
def melee_attack_bonus(self):
bonus = self.get_bonus(*self.attributes[characterclass.STR])
bonus += self.attack_bonus
if bonus > 0:
bonus = "+%d" % bonus
return bonus
@property
def ranged_attack_bonus(self):
bonus = self.get_bonus(*self.attributes[characterclass.DEX])
bonus += self.attack_bonus
if bonus > 0:
bonus = "+%d" % bonus
return bonus
def get_ac(self):
"""
The character's armor class based on their starting equipment.
"""
ac = self.base_armour_class
if "Leather Armor" in self.equipment:
ac += 2
elif "Chain Armor" in self.equipment:
ac += 4
elif "Plate Armor" in self.equipment:
ac += 6
if "Shield" in self.equipment:
ac += 1
ac += self.get_bonus(*self.attributes[characterclass.DEX])
return ac
def get_to_hit_table(self):
return None
class HitDiceMixin(object):
"""
In some OD&D games HP is re-rolled per session, so it doesn't make much
sense to display the computed HP value. Instead we simply display the HD of
the character, either 1 or 1+1 for Fighters.
"""
def get_hp(self):
# we set HP to None, which lets the template know we will display HD
# instead.
return None
@property
def hd(self):
return "1" if self.character_class != characterclass.FIGHTER else "1+1"
class PsionicWildTalentMixin(object):
"""
If you want to allow psionic wild talents as outlined in a blog post I
wrote on the topic some time ago:
"""
def __init__(self, *args, **kwargs):
super(PsionicWildTalentMixin, self).__init__(*args, **kwargs)
# roll for chance of psionic power
self.wild_talent = self.get_wild_talent()
def get_wild_talent(self):
# TODO: what frequency do I actually want here?
if d(6) != 1:
return
talent_roll = self.WIS - d(20)
if talent_roll < 0:
save_bonus = abs(talent_roll) / 2
if save_bonus:
return "+%d to saves vs. psionic attacks" % save_bonus
else:
return None
else:
return characterclass.WILD_TALENTS[talent_roll]
|
py | b40c4692e35ca7a0872a19d361ff3ebfc8916abb | """
Django settings for jdh project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
from .base import get_env_variable
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env_variable('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = get_env_variable('DEBUG', 'True') == 'True'
ALLOWED_HOSTS = get_env_variable('ALLOWED_HOSTS', 'localhost').split(',')
DRF_RECAPTCHA_SECRET_KEY = get_env_variable('DRF_RECAPTCHA_SECRET_KEY')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dashboard.apps.DashboardConfig',
'rest_framework',
'jdhapi.apps.JdhapiConfig',
'jdhseo.apps.JdhseoConfig',
'jdhtasks.apps.JdhtasksConfig',
# to use Bootsrap
'crispy_forms',
'drf_recaptcha',
'django_filters',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
],
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend']
# 'DEFAULT_PAGINATION_CLASS': [
# 'rest_framework.pagination.PageNumberPagination',
# ],
# 'PAGE_SIZE' : 5
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jdh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jdh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': get_env_variable('DATABASE_ENGINE'), # 'django.db.backends.postgresql_psycopg2',
'NAME': get_env_variable('DATABASE_NAME'),
'USER': get_env_variable('DATABASE_USER'),
'PASSWORD': get_env_variable('DATABASE_PASSWORD'),
'HOST': get_env_variable('DATABASE_HOST', 'localhost'),
'PORT': get_env_variable('DATABASE_PORT', '54320'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
JDH_SCHEMA_ROOT = get_env_variable(
'JDH_SCHEMA_ROOT',
os.path.join(BASE_DIR, 'schema')
)
# Current version
JDH_GIT_BRANCH = get_env_variable('JDH_GIT_BRANCH', 'nd')
JDH_GIT_REVISION = get_env_variable('JDH_GIT_REVISION', 'nd')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = get_env_variable('STATIC_URL', '/static/')
STATIC_ROOT = get_env_variable('STATIC_ROOT', '/static')
STATICFILES_DIRS = [
# ...
('schema', JDH_SCHEMA_ROOT),
]
MEDIA_URL = get_env_variable('MEDIA_URL', '/media/')
MEDIA_ROOT = get_env_variable('MEDIA_ROOT', '/media')
# ACCOUNT_EMAIL_VERIFICATION = 'none'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending e-mail.
EMAIL_HOST = get_env_variable('EMAIL_HOST', 'smtp.')
# Port for sending e-mail.
EMAIL_PORT = get_env_variable('EMAIL_PORT', 0)
# in settings, no request to Google, no warnings,
DRF_RECAPTCHA_TESTING = get_env_variable('DRF_RECAPTCHA_TESTING', 'False') == 'True'
# ADD logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'root': {
'handlers': ['console'],
'level': 'INFO',
},
'formatters': {
'verbose': {
# 'format': '%(levelname)s %(asctime)s %(module)s %(process)d
# %(thread)d %(message)s'
'format': '{levelname} {asctime} - {name:s} L{lineno:d}: {message}',
'style': '{',
},
},
}
# Celery
REDIS_HOST = get_env_variable('REDIS_HOST', 'localhost')
REDIS_PORT = get_env_variable('REDIS_PORT', '6379')
CELERY_BROKER_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/4'
CELERY_RESULT_BACKEND = f'redis://{REDIS_HOST}:{REDIS_PORT}/5'
CELERYD_PREFETCH_MULTIPLIER = 2
CELERYD_CONCURRENCY = 2
# jdhseo
JDHSEO_PROXY_HOST = get_env_variable(
'JDHSEO_PROXY_HOST', 'https://journalofdigitalhistory.org/')
JDHSEO_PROXY_PATH_GITHUB = get_env_variable(
'JDHSEO_PROXY_PATH_GITHUB', '/proxy-githubusercontent')
|
py | b40c47808eeb18b7de463d7db59bed24ce0b8e3c | ############################ Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 Jannis Gebauer <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2018 Wan Liuyang <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.Rate
class RateLimit(github.GithubObject.NonCompletableGithubObject):
"""
This class represents RateLimits. The reference can be found here http://developer.github.com/v3/rate_limit
"""
def __repr__(self):
return self.get__repr__({"core": self._core.value})
@property
def core(self):
"""
Rate limit for the non-search-related API
:type: class:`github.Rate.Rate`
"""
return self._core.value
@property
def search(self):
"""
Rate limit for the Search API.
:type: class:`github.Rate.Rate`
"""
return self._search.value
@property
def graphql(self):
"""
(Experimental) Rate limit for GraphQL API, use with caution.
:type: class:`github.Rate.Rate`
"""
return self._graphql.value
def _initAttributes(self):
self._core = github.GithubObject.NotSet
self._search = github.GithubObject.NotSet
self._graphql = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "core" in attributes: # pragma no branch
self._core = self._makeClassAttribute(github.Rate.Rate, attributes["core"])
if "search" in attributes: # pragma no branch
self._search = self._makeClassAttribute(
github.Rate.Rate, attributes["search"]
)
if "graphql" in attributes: # pragma no branch
self._graphql = self._makeClassAttribute(
github.Rate.Rate, attributes["graphql"]
)
|
py | b40c4817572baa1cc8132a2bfdac83ef65df18f7 | """Power commands."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
import click
@click.command()
@click.argument('identifier')
@environment.pass_env
def power_off(env, identifier):
"""Power off an active server."""
mgr = SoftLayer.HardwareManager(env.client)
hw_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'hardware')
if not (env.skip_confirmations or
formatting.confirm('This will power off the server with id %s '
'Continue?' % hw_id)):
raise exceptions.CLIAbort('Aborted.')
env.client['Hardware_Server'].powerOff(id=hw_id)
@click.command()
@click.argument('identifier')
@click.option('--hard/--soft',
default=None,
help="Perform a hard or soft reboot")
@environment.pass_env
def reboot(env, identifier, hard):
"""Reboot an active server."""
hardware_server = env.client['Hardware_Server']
mgr = SoftLayer.HardwareManager(env.client)
hw_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'hardware')
if not (env.skip_confirmations or
formatting.confirm('This will power off the server with id %s. '
'Continue?' % hw_id)):
raise exceptions.CLIAbort('Aborted.')
if hard is True:
hardware_server.rebootHard(id=hw_id)
elif hard is False:
hardware_server.rebootSoft(id=hw_id)
else:
hardware_server.rebootDefault(id=hw_id)
@click.command()
@click.argument('identifier')
@environment.pass_env
def power_on(env, identifier):
"""Power on a server."""
mgr = SoftLayer.HardwareManager(env.client)
hw_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'hardware')
env.client['Hardware_Server'].powerOn(id=hw_id)
@click.command()
@click.argument('identifier')
@environment.pass_env
def power_cycle(env, identifier):
"""Power cycle a server."""
mgr = SoftLayer.HardwareManager(env.client)
hw_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'hardware')
if not (env.skip_confirmations or
formatting.confirm('This will power off the server with id %s. '
'Continue?' % hw_id)):
raise exceptions.CLIAbort('Aborted.')
env.client['Hardware_Server'].powerCycle(id=hw_id)
|
py | b40c4897cfc415dff2d9a91cc55e3b168fd891d6 | from .base import *
ALLOWED_HOSTS = []
|
py | b40c4970105f91b384a0e9a59a64fe56441de611 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,invalid-name
import random
import tempfile
import shutil
import os.path
# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from ansible.module_utils.basic import * # noqa: F403
DOCUMENTATION = '''
---
module: openshift_container_binary_sync
short_description: Copies OpenShift binaries out of the given image tag to host system.
'''
class BinarySyncError(Exception):
def __init__(self, msg):
super(BinarySyncError, self).__init__(msg)
self.msg = msg
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class BinarySyncer(object):
"""
Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
a container onto the host system.
"""
def __init__(self, module, image, tag, backend):
self.module = module
self.changed = False
self.output = []
self.bin_dir = '/usr/local/bin'
self.image = image
self.tag = tag
self.backend = backend
self.temp_dir = None # TBD
def sync(self):
if self.backend == 'atomic':
return self._sync_atomic()
return self._sync_docker()
def _sync_atomic(self):
self.temp_dir = tempfile.mkdtemp()
temp_dir_mount = tempfile.mkdtemp()
try:
image_spec = '%s:%s' % (self.image, self.tag)
rc, stdout, stderr = self.module.run_command(['atomic', 'mount',
'--storage', "ostree",
image_spec, temp_dir_mount])
if rc:
raise BinarySyncError("Error mounting image. stdout=%s, stderr=%s" %
(stdout, stderr))
for i in ["openshift", "oc"]:
src_file = os.path.join(temp_dir_mount, "usr/bin", i)
shutil.copy(src_file, self.temp_dir)
self._sync_binaries()
finally:
self.module.run_command(['atomic', 'umount', temp_dir_mount])
shutil.rmtree(temp_dir_mount)
shutil.rmtree(self.temp_dir)
def _sync_docker(self):
container_name = "openshift-cli-%s" % random.randint(1, 100000)
rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name',
container_name, '%s:%s' % (self.image, self.tag)])
if rc:
raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" %
(stdout, stderr))
self.output.append(stdout)
try:
self.temp_dir = tempfile.mkdtemp()
self.output.append("Using temp dir: %s" % self.temp_dir)
rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name,
self.temp_dir])
if rc:
raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
(stdout, stderr))
rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name,
self.temp_dir])
if rc:
raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
(stdout, stderr))
self._sync_binaries()
finally:
shutil.rmtree(self.temp_dir)
self.module.run_command(['docker', 'rm', container_name])
def _sync_binaries(self):
self._sync_binary('openshift')
# In older versions, oc was a symlink to openshift:
if os.path.islink(os.path.join(self.temp_dir, 'oc')):
self._sync_symlink('oc', 'openshift')
else:
self._sync_binary('oc')
# Ensure correct symlinks created:
self._sync_symlink('kubectl', 'openshift')
self._sync_symlink('oadm', 'openshift')
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
# The symlink we are creating:
link_path = os.path.join(self.bin_dir, binary_name)
# The expected file we should be linking to:
link_dest = os.path.join(self.bin_dir, link_to)
if not os.path.exists(link_path) or \
not os.path.islink(link_path) or \
os.path.realpath(link_path) != os.path.realpath(link_dest):
if os.path.exists(link_path):
os.remove(link_path)
os.symlink(link_to, os.path.join(self.bin_dir, binary_name))
self.output.append("Symlinked %s to %s." % (link_path, link_dest))
self.changed = True
def _sync_binary(self, binary_name):
src_path = os.path.join(self.temp_dir, binary_name)
dest_path = os.path.join(self.bin_dir, binary_name)
incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]
if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:
# See: https://github.com/openshift/openshift-ansible/issues/4965
if os.path.islink(dest_path):
os.unlink(dest_path)
self.output.append('Removed old symlink {} before copying binary.'.format(dest_path))
shutil.move(src_path, dest_path)
self.output.append("Moved %s to %s." % (src_path, dest_path))
self.changed = True
def main():
module = AnsibleModule( # noqa: F405
argument_spec=dict(
image=dict(required=True),
tag=dict(required=True),
backend=dict(required=True),
),
supports_check_mode=True
)
image = module.params['image']
tag = module.params['tag']
backend = module.params['backend']
if backend not in ["docker", "atomic"]:
module.fail_json(msg="unknown backend")
binary_syncer = BinarySyncer(module, image, tag, backend)
try:
binary_syncer.sync()
except BinarySyncError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=binary_syncer.changed,
output=binary_syncer.output)
if __name__ == '__main__':
main()
|
py | b40c49e40534333ef5a57b267ec1008404f25647 | #
# Copyright (c) 2014 by Christian E. Hopps.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup, Extension
import sys
if sys.version_info >= (3, 0):
bstr = Extension('pyisis.bstr', sources=['src/bstr.c'])
extra = {
'ext_modules': [bstr],
'entry_points': { "console_scripts": [ "pyisis = pyisis.main:main", ] },
}
else:
bstr = Extension('pyisis.bstr', sources=['src/bstr.c'])
extra = {
'ext_modules': [bstr],
'entry_points': { "console_scripts": [ "pyisis = pyisis.main:main", ] },
}
setup (name='pyisis', # pylint: disable=W0142
version='1.0',
description='IS-IS [partial ISO10589:2002]',
author='Christian E. Hopps',
author_email='[email protected]',
packages=['pyisis'],
**extra)
|
py | b40c49f7eb0f16d75a362943d898f31047b82108 | import gfapy
class gfa_parser(object):
def __init__(self, filename):
self.filename = filename
def read_gfa_file(self):
'''
Reads a GFA file
'''
g = gfapy.Gfa.from_file(self.filename)
#Print all lines in the GFA file
#print ("\tAll lines:\n===================\n")
#for line in g.lines:
#print(line)
#print ("\tAll segments:\n===================\n")
#Print all segments in the GFA file
#for line in g.segments:
# print(line)
#print ("\tAll fragments:\n===================\n")
#Print all segments in the GFA file
#for line in g.fragments:
# print(line)
print ("\tAll edges:\n===================\n")
#Print all edges in the GFA file
print(g.edges)
return 1
|
py | b40c4a1e7458f2a56e4757dc06633966ce04bc31 | # openSearchResults.py - Opens several search results.
# TODO: To be fixed - passing "Before you continue to Google Search" window
import bs4
import os
import pyinputplus as pyip
import requests
import webbrowser
inp = pyip.inputStr(prompt="What are you looking for? ")
print('Searching...')
res = requests.get('https://google.com/search?q=' + ' '.join(inp))
res.raise_for_status()
print(f"Status: {res.status_code}")
# Retrieve top search result links.
soup = bs4.BeautifulSoup(res.text, 'html.parser')
print(soup) # output the css and html (temporarly for debugging)
# Open a browser tab for each result.
linkElems = soup.select('.package-snippet')
numOpen = min(5, len(linkElems))
for i in range(numOpen):
urlToOpen = 'https://google.com' + linkElems[i].get('href')
print('Opening', urlToOpen)
webbrowser.open(urlToOpen)
|
py | b40c4a42ac42c71b52af7c02e17138ba0be9a8bf | """ LOAD MODULES AND PROCES DATA """
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import numpy as np
import pandas as pd
import pathlib
import pickle
import dash_katex
import time
import seaborn as sn
import flask
import os
import dash_auth
import Figures
import LinearRegression
#remove regression warnings
np.seterr(all='ignore')
""" TEMPORARY PASSWORD """
#VALID_USERNAME_PASSWORD_PAIRS = [ ['itz', 'itz'] ]
server = flask.Flask(__name__)
server.secret_key = os.environ.get('secret_key', 'secret')
""" CREATE APP """
app = dash.Dash(
__name__,
server = server,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
url_base_pathname='/gunicorn/',
)
app.config.suppress_callback_exceptions = True
#auth = dash_auth.BasicAuth(app, VALID_USERNAME_PASSWORD_PAIRS)
""" LOAD DATA """
# Path
BASE_PATH = pathlib.Path(__file__).parent.resolve()
DATA_PATH = BASE_PATH.joinpath("data").resolve()
#load gene data
#gene_data = shelve.open("data/gene_data")
gene_data = pickle.load( open( "data/gene_data_dic.p", "rb" ))
gene_list = sorted(gene_data.keys())
#load atger data (already filtered to contain only common genes)
gene_data_atg = pickle.load( open( "data/gene_data_atg_dic.p", "rb" ))
gene_list_atg = sorted(gene_data_atg.keys())
#load 2D regressions
dic_reg = pickle.load( open( "data/dic_reg.p", "rb" ))
#load colors
colorscale = sn.color_palette("GnBu_d",8).as_hex()
""" CREATE TAB STYLE """
tab_style = {
'borderBottom': '1px solid #d6d6d6',
#'padding': '18px',
'fontWeight': 'normal',
#'font-size' : 'large'
}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': colorscale[3],
'color': 'white',
'padding': '18px',
#'font-size' : 'large'
}
tab_style_reg = {
'borderBottom': '1px solid #d6d6d6',
'padding': '10px',
'fontWeight': 'normal',
}
tab_selected_style_reg = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': colorscale[3],
'padding': '10px',
'color' : 'white'
}
""" CREATE IMPORTANT APP PARTS """
def description_card():
"""
:return: A Div containing dashboard title & descriptions for tab 1.
"""
return html.Div(
id="description-card",
children = [
html.H3("Welcome to CZviz"),
dcc.Markdown('''This app allows you to explore the full dataset and \
analysis from the study by _Droin & al_ published in \
_Nature Metabolism_.'''),
],
)
def description_card2():
"""
:return: A Div containing dashboard title & descriptions for tab 2.
"""
return html.Div(
id="intro2",
children = [
html.Div(id = 'paragraph-selected-gene-2', style = {'text-align':'left'}),
#dcc.Markdown("Please select the type of analysis you're interested in.")
],
)
def generate_control_card():
"""
:return: A Div containing controls for graphs.
"""
return html.Div(
id="control-card",
children=[
html.P("Please type gene name"),
dcc.Dropdown(
id="gene-select",
options=[{"label": gene_name.capitalize(), "value": gene_name} for gene_name in gene_list],
value='cry1',
),
html.Br(),
],
)
def generate_control_card2():
"""
:return: A Div containing controls for analysis.
"""
return html.Div(
id="control-card2",
children=[
#html.P("Please type gene name"),
#dcc.Dropdown(
# id="gene-select2",
# options=[{"label": gene_name.capitalize(), "value": gene_name} for gene_name in gene_list],
# value='pecr',
#),
#html.Br(),
html.P("Please select analysis"),
dcc.Dropdown(
id="analysis-select",
options=[{"label": "Spatial analysis", "value": 'spatial'},
{"label": "Temporal analysis", "value": 'temporal'},
{"label": "Spatiotemporal analysis", "value": 'spatiotemporal'},
{"label": "Validation", "value": 'validation'}],
value='spatial',
),
html.Br(),
],
)
def generate_analysis_card1():
"""
:return: A Div containing analysis 1.
"""
return html.Div(
id="analysis-card1",
children=[
html.H4('Zonation: analysis timepoint per timepoint', style = {'textAlign' : 'center'}),
dcc.Tabs(id = 'tabs-time',
children = [
dcc.Tab(
label ='t = ' + str((i-1)*6) + 'h',
value = str(i),
style = tab_style_reg,
selected_style = tab_selected_style_reg
)
for i in range(1,5)
],
value = '1',
vertical = False,
style = {'background-color': 'Grey '}
),
dcc.Loading(
id="sub-loading-1",
type="circle",
color = colorscale[3],
children=[
html.Div(
id = 'summary-space-regression',
children = [
dcc.Graph(
id ='graph-stat-space',
config = {
'displayModeBar': False,
'modeBarButtonsToRemove': [],
'displaylogo' : False
}
)
],
),
]
)
]
)
def generate_analysis_card2():
"""
:return: A Div containing analysis 2.
"""
return html.Div(
id="analysis-card2",
children=[
html.Div(
id = 'initial-title-tab3',
children = [
html.H4(
children = 'Rhythmicity: analysis layer per layer',
style = {'textAlign' : 'center'}
),
]
),
html.Div(
children = [
dcc.Graph(
id = 'graph-polar',
style = {
'width' : '24vw',
'zIndex': '4'
},
config = {
'displayModeBar' : False,
'modeBarButtonsToRemove': [],
'displaylogo' : False
}
),
dcc.Graph(
id = 'graph-mean',
style = {
'width' : '32vw',
'height' : '40vh'
},
config={
'displayModeBar': False,
'modeBarButtonsToRemove': [],
'displaylogo' : False
}
),
],
style = {
'display' : 'flex',
'flex-direction': 'rows',
'flex-wrap' : 'wrap',
'align-items' : 'flex-start',
'justify-content' : 'space-between'
}
),
html.Div(
id = 'second-title-tab3',
children = [
dcc.Tabs(
id = 'tabs-space',
children = [
dcc.Tab(
label='x = ' + str(i-1),
value = str(i),
style = tab_style_reg,
selected_style=tab_selected_style_reg) for i in range(1,9)
],
value = '1',
vertical = False,
style = {'background-color': 'Grey '}
), ],
style = {'zIndex': '10'}
),
html.Div(
style = {'height': '40vw'}, #to avoid skipping when changing tab
children = [
dcc.Loading(
id="sub-loading-2",
type="circle",
color = colorscale[3],
children=[
html.Div(
id = 'summary-time-regression',
children =
dcc.Graph(
id='graph-stat-time',
config={
'displayModeBar': False,
'modeBarButtonsToRemove': [],
'displaylogo' : False
}
),
),
],
),
]
)
]
)
def generate_analysis_card3():
"""
:return: A Div containing analysis 3.
"""
return html.Div(
id="analysis-card3",
children = [
html.Div(
id = 'fourth-title-tab3',
children = [
html.H4(
children = 'Rythmic zonation: analysis of all datapoints together',
style = {'textAlign' : 'center'}
),
]
),
html.Div(
id = 'summary-2D-regression',
children = [
dcc.Graph(id='graph-polar-fit'),
dcc.Graph(id='graph-fit-3D')
],
),
html.Div(
style = {'margin' : '0px 0px 300px 0px'}
)
],
#style = {'display':'none'},
)
def generate_analysis_card4():
"""
:return: A Div containing analysis 4.
"""
return html.Div(
id="analysis-card4",
children = [
html.Div(
id = 'title-validation',
children = [
html.H4(
children = 'Comparison with the dataset from Atger & al.',
style = {'textAlign' : 'center'}
),
]
),
html.Div(children = [
dcc.Graph(
id='graph-comparison',
style = {
'width' : '40vw',
#'height' : '40vh'
},
config={'displayModeBar': False,
'modeBarButtonsToRemove': [],
'displaylogo' : False }
),
dcc.Graph(
id='graph-polar-comparison',
style = {'width' : '25vw'},
config={'displayModeBar': False,
'modeBarButtonsToRemove': [],
'displaylogo' : False }
)
],
style = {
'display' : 'flex',
'flex-direction': 'rows',
'flex-wrap' : 'wrap',
'align-items' : 'flex-start',
'justify-content' : 'space-between'
}
),
html.Div(id = 'no-gene', style = {'text-align': 'center'}),
],
)
""" APP MAIN LAYOUT """
app.layout = html.Div(
id="app-container",
children=[
dcc.Tabs(
id = "tabs",
value = 'main-tab-1',
children=[
dcc.Tab(
label='Gene selection',
value = 'main-tab-1',
style=tab_style, selected_style=tab_selected_style,
children=[
# Left column
html.Div(
id="left-column",
className="two columns",
children=[description_card(), generate_control_card()]
),
#html.H4(id='wait-text', children = 'Please wait while the data is being processed...'),
dcc.Loading(
id="loading1",
type="circle",
color = colorscale[3],
children=[
# Middle column
html.Div(
id="middle-column",
className="five columns",
children=[
html.Div(
id="graph_tab_1_card",
children = dcc.Graph(
id='graph-space',
config={'displayModeBar': False,
'modeBarButtonsToRemove': [],
'displaylogo' : False
},
#style = {'margin': 'auto'},
),
),
html.Div(
id="graph_tab_3_card",
children = dcc.Graph(
id='graph-3d',
config={
'modeBarButtonsToRemove': [
'sendDataToCloud',
'resetCameraLastSave3d',
'hoverClosest3d',
'zoom3d',
'toImage'
],
'displaylogo' : False,
'scrollZoom': False
},
style = {
'border-width':'1px',
'border-style':'solid',
'border-color':'#e8e8e8'
}
),
),
],
),
# Right column
html.Div(
id="right-column",
className="five columns",
children=[
html.Div(
id="graph_tab_2_card",
children = dcc.Graph(
id='graph-time',
config={'displayModeBar': False,
'modeBarButtonsToRemove': [],
'displaylogo' : False
},
#style = {'margin': 'auto'},
),
),
html.Div(
id="data_card",
children=[
html.H6("Raw data"),
html.Br(),
html.Div(id = 'div-selected-genes', children = []),
],
),
],
), ]),
],
),
dcc.Tab(
label='Statistical analysis',
value = 'main-tab-2',
style=tab_style, selected_style=tab_selected_style,
children=[
# Left column
html.Div(
id="left-column2",
className="three columns",
children=[description_card2(), generate_control_card2()]
),
dcc.Loading(
id="loading2",
color = colorscale[3],
type="circle",
children=[
# Right column
html.Div(
id="right-column2",
className="nine columns",
children=[
html.Div(
id = 'analysis-id',
children=[
generate_analysis_card1(),
generate_analysis_card2(),
generate_analysis_card3(),
generate_analysis_card4()
]
),
],
), ]),
],
),
],
),
html.Div(children = "©2020 Naef lab",
style = {'position':'fixed',
'bottom':'0',
'right':'0',
'left':'0',
'background':colorscale[3],
'padding':'10px',
'box-sizing':'border-box',
'color':'white',
}
)
],
)
""" CALLBACK FOR TAB 1 """
@app.callback(
[Output('graph-space', 'figure'),
Output('graph-time', 'figure'),
Output('graph-3d', 'figure'),
Output('div-selected-genes', 'children'),
],
[Input('gene-select', 'value')])#, Input('yaxis-type', 'value'), Input('yaxis-scale', 'value'), Input('data-type', 'value')])
def update_figure_time(gene_name):#, yaxis_type, yaxis_scale, data_type, value_tab ):
if gene_name is None:
raise Exception()
else:
fig_space = Figures.compute_figure_space(gene_data[gene_name])
fig_time = Figures.compute_figure_time(gene_data[gene_name])
fig_3D = Figures.compute_figure_3D(gene_data[gene_name])
data = gene_data[gene_name]
array_gene = data['rep1']
array_gene_std = data['rep1_std']
array_gene_2 = data['rep2']
array_gene_std_2 = data['rep2_std']
array_gene_3 = data['rep3']
array_gene_std_3 = data['rep3_std']
l_tr = [ [ Styled_th(' ', {'background-color': colorscale[5]}) ] + [ Styled_th('x = ' + str(x)) for x in range(8) ] ]
for idx, i in enumerate(range(0,24,6)):
l_th = [ Styled_th('t = ' + str(i) + 'h', {'background-color': colorscale[5]}) ]
for j in range(0,8,1):
if i==0 or i==12:
l_th.append( Styled_th( format(round(array_gene[j][idx],2)) + ', ' + format(round(array_gene_2[j][idx],2)) + ', ' + format(round(array_gene_3[j][idx],2)) , small = True ) )
else:
l_th.append( Styled_th( format(round(array_gene[j][idx],2)) + ', ' + format(round(array_gene_2[j][idx],2)) , small = True ) )
l_tr.append(l_th)
table = html.Table( [html.Tr(l, style = { 'background-color': colorscale[5]}) if m==0 else html.Tr(l) for m,l in enumerate(l_tr)], style = {'border-collapse': 'collapse' , 'width': '100%'})
return fig_space, fig_time, fig_3D, table
@app.callback(
Output('paragraph-selected-gene-2', 'children'),
[Input('gene-select', 'value')])
def update_figure_time(gene_name):
if gene_name is None:
raise Exception()
else:
return dcc.Markdown('**The gene you selected is: ' + gene_name.capitalize() + '**')
""" CALLBACK FOR TAB 2 """
@app.callback([
Output('analysis-card1', 'style'),
Output('analysis-card2', 'style'),
Output('analysis-card3', 'style'),
Output('analysis-card4', 'style'),
],
[Input('analysis-select', 'value')])
def update_figure_time(value):
if value =='spatial':
return {}, {'display' : 'none'}, {'display' : 'none'}, {'display' : 'none'}
elif value == 'temporal':
return {'display' : 'none'}, {}, {'display' : 'none'}, {'display' : 'none'}
elif value == 'validation':
return {'display' : 'none'}, {'display' : 'none'}, {'display' : 'none'}, {}
else:
return {'display' : 'none'}, {'display' : 'none'}, {}, {'display' : 'none'}
@app.callback(
Output('summary-space-regression', 'children'),
[Input('tabs-time', 'value'), Input('tabs', 'value'), Input('analysis-select', 'value')],
[State('gene-select', 'value')])#, Input('yaxis-type', 'value'), Input('yaxis-scale', 'value'),Input('tabs', 'value')])
def update_figure_fits(value, value_main_tab, value_analysis, gene_name): #yaxis_type, yaxis_scale, value_tab ):
if gene_name is None:
raise Exception()
else:
if value_main_tab == 'main-tab-2' and value_analysis == 'spatial':
#correct value
value = int(value)-1
t = int(value)
array_gene_space = np.concatenate( (gene_data[gene_name]['rep1'], gene_data[gene_name]['rep2'], gene_data[gene_name]['rep3']), axis = 0)
if t==0 or t==2:
selected, B, SE, adj_r2, aic, bic, pv, X_pred, Y_pred = LinearRegression.make_space_regression(array_gene_space[:,t], predict= True)
else:
selected, B, SE, adj_r2, aic, bic, pv, X_pred, Y_pred = LinearRegression.make_space_regression(array_gene_space[:16,t], predict= True)
if len(selected) == 1:
str_param = dash_katex.DashKatex(id='katex_a0', expression=' \mu_0')
else:
str_param = dash_katex.DashKatex(id='katex_other_parameters', expression = return_str_list_param(selected) )
space_domain = np.concatenate((np.linspace(0,8,8, endpoint = False),np.linspace(0,8,8, endpoint = False), np.linspace(0,8,8, endpoint = False)))
if t==0 or t==2:
figure = Figures.compute_figure_space_tab_3(space_domain, array_gene_space[:,t], X_pred, Y_pred)#, yaxis_type, yaxis_scale)
else:
figure = Figures.compute_figure_space_tab_3(space_domain[:16], array_gene_space[:16,t], X_pred, Y_pred)#, yaxis_type, yaxis_scale)
return [html.Div(children = [
html.P('Retained parameters: '),
html.Div(style = {'width' : '5px'}),str_param]
,
style = {'display' : 'flex', 'justify-content':'center'}
),
dcc.Graph(id='graph-stat-space',
figure = figure,
config={'displayModeBar': False, 'modeBarButtonsToRemove': [], 'displaylogo' : False },
style = {'width' : '60vw'}
)
]
else:
raise PreventUpdate
@app.callback(
[Output('graph-polar', 'figure'),Output('graph-mean', 'figure')],
[Input('tabs', 'value'), Input('analysis-select', 'value')],
[State('gene-select', 'value')])
def update_figure_polar(value_main_tab, value_analysis, gene_name):
if gene_name is None:
raise Exception()
else:
if value_main_tab == 'main-tab-2' and value_analysis == 'temporal':
array_gene_time =np.concatenate( (gene_data[gene_name]['rep1'], gene_data[gene_name]['rep2'], gene_data[gene_name]['rep3'][:,[0,2]]), axis = 1)
l_time_reg = []
for x in range(8):
l_time_reg.append(LinearRegression.make_time_regression(array_gene_time[x,:], simple = False, predict= True))
l_time_reg_simple = []
for x in range(8):
l_time_reg_simple.append(LinearRegression.make_time_regression(array_gene_time[x,:], simple = True, predict= False))
figure_polar = Figures.compute_figure_polar_tab_3(l_time_reg)
figure_mean = Figures.compute_figure_mean_tab_3(l_time_reg)#, yaxis_type, yaxis_scale)
return figure_polar, figure_mean
else:
raise PreventUpdate
@app.callback(
Output('summary-time-regression', 'children'),
[Input('tabs-space', 'value'), Input('tabs', 'value'), Input('analysis-select', 'value')],
[State('gene-select', 'value')])
def update_figure_polar(value, value_main_tab, value_analysis, gene_name):
if gene_name is None:
raise Exception()
else:
if value_main_tab == 'main-tab-2' and value_analysis == 'temporal':
array_gene_time =np.concatenate( (gene_data[gene_name]['rep1'], gene_data[gene_name]['rep2'], gene_data[gene_name]['rep3'][:,[0,2]]), axis = 1)
l_time_reg = []
for x in range(8):
l_time_reg.append(LinearRegression.make_time_regression(array_gene_time[x,:], simple = False, predict= True))
l_time_reg_simple = []
for x in range(8):
l_time_reg_simple.append(LinearRegression.make_time_regression(array_gene_time[x,:], simple = True, predict= False))
#correct value
value = int(value)-1
B, SE, adj_r2, aic, bic, pv, X_pred, Y_pred = l_time_reg[value]
[mu_1, a_1, b_1] = B.flatten()
[std_mu_1, std_a_1, std_b_1] = np.diagonal(SE)
bic_1 = bic
aic_1 = aic
r2_1 = adj_r2
B_simple, SE_simple, adj_r2_simple, aic_simple, bic_simple, pv_simple = l_time_reg_simple[value]
[mu_2] = B_simple.flatten()
[std_mu_2] = np.diagonal(SE_simple)
bic_2 = bic_simple
aic_2 = aic_simple
r2_2 = adj_r2_simple
table_model_1 = html.Table([html.Tr([Styled_th('Parameter'), Styled_th('Mean'), Styled_th('SE')], style = { 'background-color': colorscale[5]}),
html.Tr([Styled_th(dash_katex.DashKatex(expression='\mu'), { 'background-color': colorscale[5]}), Styled_th('{:.2e}'.format(mu_1)), Styled_th('{:.2e}'.format(std_mu_1))]),
html.Tr([Styled_th(dash_katex.DashKatex(expression='a'), { 'background-color': colorscale[5]}), Styled_th('{:.2e}'.format(a_1)), Styled_th('{:.2e}'.format(std_a_1))]),
html.Tr([Styled_th(dash_katex.DashKatex(expression='b'), { 'background-color': colorscale[5]}), Styled_th('{:.2e}'.format(b_1)), Styled_th('{:.2e}'.format(std_b_1))])
])
table_model_2 = html.Table([html.Tr([Styled_th('Parameter'), Styled_th('Mean'), Styled_th('SE')], style = { 'background-color': colorscale[5]}),
html.Tr([Styled_th(dash_katex.DashKatex(expression='\mu'), { 'background-color': colorscale[5]}), Styled_th('{:.2e}'.format(mu_2)), Styled_th('{:.2e}'.format(std_mu_2))])
])
table_comparison = html.Table([html.Tr([Styled_th('Model'), Styled_th('BIC'), Styled_th('AIC'), Styled_th(dash_katex.DashKatex(expression='\\text{Adj. } R^2') )], style = { 'background-color': colorscale[5]}),
html.Tr([Styled_th('Intercept-only', { 'background-color': colorscale[5]}), Styled_th('{:.2e}'.format(bic_2)), Styled_th('{:.2e}'.format(aic_2)), Styled_th('{:.2e}'.format(r2_2))]),
html.Tr([Styled_th('Oscillatory', { 'background-color': colorscale[5]}), Styled_th('{:.2e}'.format(bic_1)), Styled_th('{:.2e}'.format(aic_1)), Styled_th('{:.2e}'.format(r2_1))])
])
time_domain = np.concatenate((np.linspace(0,24,4, endpoint = False),np.linspace(0,24,4, endpoint = False), np.linspace(0,24,2, endpoint = False)))
x = int(value)
B, SE, adj_r2, aic, bic, pv, X_pred, Y_pred = l_time_reg[x]
figure = Figures.compute_figure_time_tab_3(time_domain, array_gene_time[x,:], X_pred, Y_pred)#, yaxis_type, yaxis_scale)
return [html.Div(children = [
html.Div(children = [html.H6('Intercept-only model', style = {'textAlign' : 'center'}) , table_model_2]),
html.Div(children = [html.H6('Oscillatory model', style = {'textAlign' : 'center'}), table_model_1]),
html.Div(children = [html.H6('Models comparison', style = {'textAlign' : 'center'}), table_comparison, html.P('P-value associated with the oscillatory model (ANOVA): ' + str(pv))], style = {'display' : 'flex', 'flex-direction': 'column'}),
],
style = {'display' : 'flex', 'flex-direction': 'row', 'justify-content' : 'space-around', 'flex-wrap' : 'wrap', 'flex-align':'baseline'}),
dcc.Graph(id='graph-stat-time', figure = figure, config={'displayModeBar': False, 'modeBarButtonsToRemove': [], 'displaylogo' : False }, style = {'width' : '60vw'})
]
else:
raise PreventUpdate
@app.callback(
Output('summary-2D-regression', 'children'),
[Input('tabs', 'value'), Input('analysis-select', 'value')],
[State('gene-select', 'value')])#, Input('yaxis-type', 'value'), Input('yaxis-scale', 'value'),Input('tabs', 'value')])
def update_figure_fits(value_main_tab, value_analysis, gene_name):#, yaxis_type, yaxis_scale, value_tab ):
if gene_name is None:
raise Exception()
else:
if value_main_tab == 'main-tab-2' and value_analysis == 'spatiotemporal':
array_gene_time = np.concatenate( (gene_data[gene_name]['rep1'], gene_data[gene_name]['rep2'], gene_data[gene_name]['rep3'][:,[0,2]]), axis = 1)
fig_3D = Figures.compute_figure_3D_tab_3(dic_reg[gene_name], array_gene_time)#, yaxis_type, yaxis_scale)
selected = dic_reg[gene_name][0]
pv = dic_reg[gene_name][6]
set_selected = set(selected)
if len(selected) == 1:
str_param = dash_katex.DashKatex(expression='\\text{ } \mu_0\\text{. }')
str_param2 = 'This corresponds to the flat model.'
else:
str_param = dash_katex.DashKatex(expression='\\text{ }'+return_str_list_param(selected)+'\\text{. }')
if set_selected == set(['mu0', 'a0','b0']) or set_selected ==set(['mu0', 'a0']) or set_selected == set(['mu0', 'b0']):
str_param2= "This corresponds to the rhythmic model."
elif 'a0' not in selected and 'a1' not in selected and 'a2' not in selected and 'b0' not in selected and 'b1' not in selected and 'b2' not in selected:
if 'mu1' in selected or 'mu2' in selected:
str_param2 = "This corresponds to the zonated model."
else:
str_param2 = "This corresponds to the rhythmic-zonated model."
l_time_reg = []
for x in range(8):
l_time_reg.append(LinearRegression.make_time_regression(array_gene_time[x,:], simple = False, predict= True))
fig_polar = Figures.compute_figure_polar_fit_tab_3(dic_reg[gene_name],l_time_reg)
if fig_polar!=None:
return [html.Div(children=[html.P("Retained parameters: "), str_param, html.P(str_param2)], style = {'display' : 'flex', 'justify-content':'center'}),
html.Div(children = [
dcc.Graph(id='graph-polar-fit',
figure = fig_polar,
config={'displayModeBar': False, 'modeBarButtonsToRemove': [], 'displaylogo' : False },
style = {'width' : '25vw'}
),
dcc.Graph(id='graph-fit-3D',
figure = fig_3D,
config={'modeBarButtonsToRemove': ['sendDataToCloud', 'resetCameraLastSave3d', 'hoverClosest3d', 'zoom3d', 'toImage'], 'displaylogo' : False },
style = {'border-width':'1px', 'border-style':'solid', 'border-color':'#e8e8e8', 'width' : '45vw'} )
],
style = {'display' : 'flex', 'flex-direction': 'row', 'justify-content' : 'space-between', 'height': '500px'},
),
]
else:
return [html.Div(children=[html.P("Retained parameters: "), str_param, html.P(str_param2)], style = {'display' : 'flex', 'justify-content':'center'}),
html.Div(children = [
dcc.Graph(id='graph-fit-3D', figure = fig_3D, config={'modeBarButtonsToRemove': ['sendDataToCloud', 'resetCameraLastSave3d', 'hoverClosest3d', 'zoom3d', 'toImage'], 'displaylogo' : False },
style = {'border-width':'1px', 'border-style':'solid', 'border-color':'#e8e8e8', 'width' : '60vw'} )
],
style = {'display' : 'flex', 'flex-direction': 'row', 'justify-content' : 'space-around'}
),
]
else:
raise PreventUpdate
@app.callback(
Output('graph-comparison', 'figure'),
[Input('tabs', 'value'), Input('analysis-select', 'value')],
[State('gene-select', 'value')])
def make_graph(value_main_tab, value_analysis, gene_name):#, yaxis_type, yaxis_scale, data_type, value_tab):
if gene_name is None or gene_name not in gene_data_atg:
raise Exception()
else:
if value_main_tab == 'main-tab-2' and value_analysis == 'validation':
data_atg = gene_data_atg[gene_name]
data_itz = gene_data[gene_name]
array_atg = np.nanmean( data_atg, axis = 1)
array_itz = np.nanmean(np.nanmean( [data_itz['rep1'], data_itz['rep2'], data_itz['rep3']], axis = 0), axis = 0)
return Figures.compute_figure_comparison(array_atg, array_itz)
else:
raise PreventUpdate
@app.callback(
Output('graph-polar-comparison', 'figure'),
[Input('tabs', 'value'), Input('analysis-select', 'value')],
[State('gene-select', 'value')])
def make_graph(value_main_tab, value_analysis, gene_name):#, yaxis_type, yaxis_scale, data_type, value_tab):
if gene_name is None or gene_name not in gene_data_atg:
raise Exception()
else:
if value_main_tab == 'main-tab-2' and value_analysis == 'validation':
data_atg = gene_data_atg[gene_name]
data_itz = gene_data[gene_name]
array_atg = np.nanmean( data_atg, axis = 1)
array_itz = np.nanmean(np.nanmean( [data_itz['rep1'], data_itz['rep2'], data_itz['rep3']], axis = 0), axis = 0)
return Figures.compute_figure_polar_comparison(array_atg, array_itz)
else:
raise PreventUpdate
@app.callback(
[Output('graph-comparison', 'style'),
Output('graph-polar-comparison', 'style')
],
[Input('tabs', 'value'), Input('analysis-select', 'value')],
[State('gene-select', 'value')])
def make_graph(value_main_tab, value_analysis, gene_name):#, yaxis_type, yaxis_scale, data_type, value_tab):
if value_main_tab == 'main-tab-2' and value_analysis == 'validation':
if gene_name is None or gene_name not in gene_data_atg:
return {'display' : 'none'}, {'display' : 'none'}
else:
return {'width' : '40vw'}, {'width' : '25vw'}
else:
raise PreventUpdate
@app.callback(
Output('no-gene', 'children'),
[Input('tabs', 'value'), Input('analysis-select', 'value')],
[State('gene-select', 'value')])
def make_graph(value_main_tab, value_analysis, gene_name):#, yaxis_type, yaxis_scale, data_type, value_tab):
if value_main_tab == 'main-tab-2' and value_analysis == 'validation':
if gene_name is None or gene_name not in gene_data_atg:
return dcc.Markdown('This gene is not available in *Atger & al dataset*')
else:
return ''
else:
raise PreventUpdate
""" IMPORTANT FUNCTIONS """
def Styled_th(x, supp = {}, small = False):
if small:
style = { 'border': '1px solid #dddddd', 'text-align': 'center', 'padding': '4px', 'font-size' : 'small', 'font-weight' : 'lighter'}
else:
style = { 'border': '1px solid #dddddd', 'text-align': 'center', 'padding': '4px', 'font-weight' : 'lighter'}
for key, val in supp.items():
style[key] = val
return html.Th( x , style = style)
def return_str_list_param(l_param, b_sorted = False):
str_p = ''
if b_sorted:
l_param = sorted(l_param)
for param in l_param:
if len(param)>3:
p1, p2 = param.split('+')
if len(p1)>=3:
p1 = '\\' + p1
str_p += p1[:-1]+'_'+p1[-1] + ', ' + p2[:-1]+ '_'+p2[1]+ ', '
else:
if len(param)>=3:
param = '\\' + param
str_p += param[:-1] + '_' + param[-1] + ', '
return str_p[:-2]
# Run the server
if __name__ == "__main__":
app.run_server(debug=False)
|
py | b40c4aea83ec8f5ea7a94b66494950194f5fbfe8 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
"""
multi-sclae testing is not used
USE_07_METRIC=True:
cls : car|| Recall: 0.9520103761348897 || Precison: 0.2687660197729769|| AP: 0.8728004904046309
cls : plane|| Recall: 0.982392776523702 || Precison: 0.5515842839036755|| AP: 0.9042372698694664
mAP is : 0.8885188801370487
USE_07_METRIC=False:
cls : plane|| Recall: 0.982392776523702 || Precison: 0.5515842839036755|| AP: 0.9752551520092599
cls : car|| Recall: 0.9520103761348897 || Precison: 0.2687660197729769|| AP: 0.9078671502390506
mAP is : 0.9415611511241553
"""
# ------------------------------------------------
VERSION = 'RetinaNet_UCAS-AOD_Baseline_2x_20201005'
NET_NAME = 'resnet101_v1d' # 'MobilenetV2'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 5000 * 2
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_R_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
EVAL_THRESHOLD = 0.5
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 0.5
REG_LOSS_MODE = None
ALPHA = 1.0
BETA = 1.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'UCAS-AOD' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = [800, 600, 1000, 1200]
IMG_MAX_LENGTH = 1500
CLASS_NUM = 2
IMG_ROTATE = True
RGB2GRAY = True
VERTICAL_FLIP = True
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = True
# --------------------------------------------- Network_config
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
FPN_CHANNEL = 256
# ---------------------------------------------Anchor config
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 180 # 90 or 180
# --------------------------------------------RPN config
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.65
|
py | b40c4af3420223e54f15726eecd599940f5d09c5 | import urllib3
import urllib.parse
import json as json_
import re
headers = {'Accept-Encoding': 'gzip'}
http = urllib3.PoolManager()
def url(url, **kwargs):
return url + '?' + urllib.parse.urlencode(kwargs)
def r(method, url, json=None):
"""Returns HTTPResponse object (including res.reason, .status, .headers) and also .json."""
_headers = headers.copy()
if json:
body = json_.dumps(json, separators=(',', ':')).encode()
_headers['Content-Type'] = 'application/json'
else:
body = None
res = http.request(method, url, headers=_headers, body=body)
res.json = json_.loads(data) if (data := res.data.decode()) else None
return res
def get(url):
"""Returns HTTPResponse object (including res.reason, .status, .headers) and also .json, .next_url."""
res = r('GET', url)
links = [link for link in res.headers.get_all('link', []) if 'rel="next"' in link]
res.next_url = re.search('<(.*)>', links[0]).group(1) if links else None
return res
def post(url, json=None):
return r('POST', url, json)
def put(url, json=None):
return r('PUT', url, json)
def patch(url, json=None):
return r('PATCH', url, json)
def delete(url):
return r('DELETE', url)
|
py | b40c4dac0cd5672fa5a03fd9a6c0aaf822db0622 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2 22:46:44 2017
@author: anilosmantur
"""
import community
import networkx as nx
from networkx.algorithms import community as nx_com
from networkx.algorithms import approximation as nx_approx
import matplotlib.pyplot as plt
import UtilitiesNetwork as un
print('library imports done')
FILE_NAME = '/home/anilosmantur/**/complex_networks/project/ego_facebook/facebook_combined.txt'
edges = []
with open(FILE_NAME) as netfile:
print('file opened')
for i, line in enumerate(netfile):
words = line.split()
edges.append((int(words[0]), int(words[1])))
print('Reading edges finished')
fb_net = nx.Graph(edges)
info = nx.info(fb_net) + '\n'
avgCluster_coef = nx.average_clustering(fb_net)
Cl_co = 'Estimation of avgerage clusternig coefficient:'+str(avgCluster_coef) + '\n'
dens = nx.density(fb_net)
dens = 'Density of network: ' + str(dens) + '\n'
#max_clique = nx_approx.max_clique(fb_net)
#print(max_clique)
# drawing the graph
pos = nx.spring_layout(fb_net)
un.drawGraphSave(fb_net, pos, 8, 'fbNet_')
plt.close()
part = community.best_partition(fb_net)
size = float(len(set(part.values())))
com = 'Found community count: ' + str(size) + '\n'
mode = community.modularity(part, fb_net)
mode = 'Modularity: ' + str(mode) + '\n'
un.drawCommunityGraphSave(fb_net, pos, part, 8, 'fbnet_')
del part
plt.close()
centb = nx.centrality.betweenness_centrality(fb_net)
un.centralityPlotSave(centb, 5, 'fbnet_', 'betweenness')
un.drawCentralityGraphSave(fb_net, pos, centb, 8, 'fbnet_', 'betweenness')
del centb
plt.close()
centd = nx.centrality.degree_centrality(fb_net)
un.centralityPlotSave(centd, 5, 'fbnet_', 'degree')
un.drawCentralityGraphSave(fb_net, pos, centd, 8, 'fbnet_', 'degree')
del centd
plt.close()
with open('sums/fbnet_sum.txt', 'w') as sumfile:
sumfile.write(info)
sumfile.write(Cl_co)
sumfile.write(com)
sumfile.write(mode)
sumfile.write(dens)
# analyze the network
hist = nx.degree_histogram(fb_net)
plt.figure(figsize=(10, 10))
plt.plot(hist, linestyle=':')
plt.title('Degree Historam')
plt.savefig('fbNet_Degree.png')
plt.close()
print('Degree Historam finished')
lap_spec = nx.laplacian_spectrum(fb_net)
plt.plot(lap_spec)
plt.title('Eigenvalues of the Laplacian')
plt.savefig('fbNet_LapSpec.png')
plt.close()
print('Eigenvalues of the Laplacian')
adj_spec = nx.adjacency_spectrum(fb_net)
plt.plot(adj_spec)
plt.title('Eigenvalues of the Adjaceny')
plt.savefig('fbNet_AdjSpec.png')
plt.close()
print('Eigenvalues of the Adjaceny')
spec_ordering = nx.spectral_ordering(fb_net)
plt.plot(spec_ordering)
plt.title('Spectral Ordering')
plt.savefig('fbNet_SpecOrder.png')
plt.close()
print('Spectral Ordering') |
py | b40c4f5030c6e344e5042a8513dd8d346042c7cf | # UNIDAD 08.D20 - D26
# API - Consumiendo una API de terceros
print('\n\n---[Diapo 20]---------------------')
print('API - Consumiendo una API de terceros')
import requests
import decouple
My_NewsApi_KEY = decouple.config('My_NewsApi_KEY')
url = 'https://newsapi.org/v2/everything?q=tesla&from=2022-04-10&sortBy=publishedAt&apiKey=' + My_NewsApi_KEY
response = requests.get(url)
status_code = response.status_code
if status_code == 200:
content = response.content
print('content: ', content)
else:
print('Error en la solicitud')
print('\n\n---[Diapo 22]---------------------')
print('API - Consumiendo una API de terceros')
import requests
import decouple
My_NewsApi_KEY = decouple.config('My_NewsApi_KEY')
url = 'https://newsapi.org/v2/everything?q=tesla&from=2022-04-10&sortBy=publishedAt&apiKey=' + My_NewsApi_KEY
response = requests.get(url)
status_code = response.status_code
if status_code == 200:
json = response.json()
print('tipo: ', type(json))
print(json)
else:
print('Error en la respuesta')
print('\n\n---[Diapo 24]---------------------')
print('API - Consumiendo una API de terceros')
import requests
import decouple
My_NewsApi_KEY = decouple.config('My_NewsApi_KEY')
url = 'https://newsapi.org/v2/everything?q=tesla&from=2022-04-10&sortBy=publishedAt&apiKey=' + My_NewsApi_KEY
response = requests.get(url)
status_code = response.status_code
if status_code == 200:
json = response.json()
status = json['status']
cantidadResultados = json['totalResults']
print('Status: ', status)
print('Cantidad noticias: ', cantidadResultados)
else:
print('Error en la solicitud')
print('\n\n---[Diapo 25]---------------------')
print('API - Consumiendo una API de terceros')
import requests
import decouple
My_NewsApi_KEY = decouple.config('My_NewsApi_KEY')
url = 'https://newsapi.org/v2/everything?q=tesla&from=2022-04-10&sortBy=publishedAt&apiKey=' + My_NewsApi_KEY
response = requests.get(url)
status_code = response.status_code
if status_code == 200:
json = response.json()
noticia = json['articles']
cantidadResultados = json['totalResults']
print('Type: ', type(noticia))
print('Primera noticias ', noticia)
else:
print('Error en la solicitud')
print('\n\n---[Diapo 26]---------------------')
print('API - Consumiendo una API de terceros')
import requests
import decouple
My_NewsApi_KEY = decouple.config('My_NewsApi_KEY')
url = 'https://newsapi.org/v2/everything?q=tesla&from=2022-04-10&sortBy=publishedAt&apiKey=' + My_NewsApi_KEY
response = requests.get(url)
status_code = response.status_code
if status_code == 200:
json = response.json()
noticia = json['articles'][0]
fuente = noticia['source']
autor = noticia['author']
titulo = noticia['title']
descripcion = noticia['description']
url = noticia['url']
print('Fuente: ', fuente)
print('Autor: ', autor)
print('Titulo: ', titulo)
print('Descripcion: ', descripcion)
print('url: ', url)
else:
print('Error en la solicitud')
|
py | b40c50ff08cee365b0ece49383a91fbac65c2703 | class Solution:
def backspaceCompare(self, s: str, t: str) -> bool:
# whenever we have nonbackspace push to stack
# if we hit a backspace pop from stack
def processStr(string, stack):
for char in string:
if char != '#':
stack.append(char)
elif char == '#' and stack:
stack.pop()
return stack
return processStr(s, []) == processStr(t, [])
|
py | b40c519ea858ca67b53901584a5a3e55bc1ebdcd | from __future__ import unicode_literals
from django.db import models
from mptt.managers import TreeManager
class DocumentIndexInstanceNodeManager(models.Manager):
def get_for(self, document):
return self.filter(documents=document)
class IndexManager(models.Manager):
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def index_document(self, document):
for index in self.filter(enabled=True, document_types=document.document_type):
index.index_document(document=document)
def rebuild(self):
for index in self.all():
index.rebuild()
class IndexInstanceNodeManager(TreeManager):
def delete_empty(self):
# Select leaf nodes only because .delete_empty() bubbles up
for root_nodes in self.filter(parent=None):
for index_instance_node in root_nodes.get_leafnodes():
index_instance_node.delete_empty()
def remove_document(self, document):
for index_instance_node in self.filter(documents=document):
index_instance_node.remove_document(document=document)
|
py | b40c5359c67039738c0f58a8d9dd52d5d26c4bf5 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 18 10:00:35 2018
@author: juangabriel
"""
import torch
import random
import gym
import numpy as np
from datetime import datetime
from argparse import ArgumentParser
from libs.perceptron import SLP
from libs.cnn import CNN
from utils.decay_schedule import LinearDecaySchedule
from utils.experience_memory import ExperienceMemory, Experience
from utils.params_manager import ParamsManager
import environments.atari as Atari
import environments.utils as env_utils
from tensorboardX import SummaryWriter
## Parseador de Argumentos
args = ArgumentParser("DeepQLearning")
args.add_argument("--params-file", help = "Path del fichero JSON de parámetros. El valor por defecto es parameters.json",
default="parameters.json", metavar = "PFILE")
args.add_argument("--env", help = "Entorno de ID de Atari disponible en OpenAI Gym. El valor por defecto será SeaquestNoFrameskip-v4",
default = "SeaquestNoFrameskip-v4", metavar="ENV")
args.add_argument("--gpu-id", help = "ID de la GPU a utilizar, por defecto 0", default = 0, type = int, metavar = "GPU_ID")
args.add_argument("--test", help = "Modo de testing para jugar sin aprender. Por defecto está desactivado",
action = "store_true", default = False)
args.add_argument("--render", help = "Renderiza el entorno en pantalla. Desactivado por defecto", action="store_true", default=False)
args.add_argument("--record", help = "Almacena videos y estados de la performance del agente", action="store_true", default=False)
args.add_argument("--output-dir", help = "Directorio para almacenar los outputs. Por defecto = ./trained_models/results",
default = "./trained_models/results")
args = args.parse_args()
# Parámetros globales
manager = ParamsManager(args.params_file)
# Ficheros de logs acerca de la configuración de las ejecuciones
summary_filename_prefix = manager.get_agent_params()['summary_filename_prefix']
summary_filename = summary_filename_prefix + args.env + datetime.now().strftime("%y-%m-%d-%H-%M")
## Summary Writer de TensorBoardX
writer = SummaryWriter(summary_filename)
manager.export_agent_params(summary_filename + "/"+"agent_params.json")
manager.export_environment_params(summary_filename + "/"+"environment_params.json")
#Contador global de ejecuciones
global_step_num = 0
# Habilitar entrenamiento por gráfica o CPU
use_cuda = manager.get_agent_params()['use_cuda']
device = torch.device("cuda:"+str(args.gpu_id) if torch.cuda.is_available() and use_cuda else "cpu")
# Habilitar la semilla aleatoria para poder reproducir el experimento a posteriori
seed = manager.get_agent_params()['seed']
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available() and use_cuda:
torch.cuda.manual_seed_all(seed)
class DeepQLearner(object):
def __init__(self, obs_shape, action_shape, params):
self.params = params
self.gamma = self.params['gamma']
self.learning_rate = self.params['learning_rate']
self.best_mean_reward = -float("inf")
self.best_reward = -float("inf")
self.training_steps_completed = 0
self.action_shape = action_shape
if len(obs_shape) == 1: ## Solo tenemos una dimensión del espacio de observaciones
self.DQN = SLP
elif len(obs_shape) == 3: ## El estado de observaciones es una imagen/3D
self.DQN = CNN
self.Q = self.DQN(obs_shape, action_shape, device).to(device)
self.Q_optimizer = torch.optim.Adam(self.Q.parameters(), lr = self.learning_rate)
if self.params['use_target_network']:
self.Q_target = self.DQN(obs_shape, action_shape, device).to(device)
self.policy = self.epsilon_greedy_Q
self.epsilon_max = self.params['epsilon_max']
self.epsilon_min = self.params['epsilon_min']
self.epsilon_decay = LinearDecaySchedule(initial_value = self.epsilon_max,
final_value = self.epsilon_min,
max_steps = self.params['epsilon_decay_final_step'])
self.step_num = 0
self.memory = ExperienceMemory(capacity = int(self.params['experience_memory_size']))
def get_action(self, obs):
obs = np.array(obs)
obs = obs / 255.0
if len(obs.shape) == 3: # tenemos una imagen
if obs.shape[2] < obs.shape[0]: # WxHxC -> C x H x W
obs = obs.reshape(obs.shape[2], obs.shape[1], obs.shape[0])
obs = np.expand_dims(obs, 0)
return self.policy(obs)
def epsilon_greedy_Q(self, obs):
writer.add_scalar("DQL/epsilon", self.epsilon_decay(self.step_num), self.step_num)
self.step_num +=1
if random.random() < self.epsilon_decay(self.step_num) and not self.params["test"]:
action = random.choice([a for a in range(self.action_shape)])
else:
action = np.argmax(self.Q(obs).data.to(torch.device('cpu')).numpy())
return action
def learn(self, obs, action, reward, next_obs, done):
if done:
td_target = reward + 0.0
else:
td_target = reward + self.gamma * torch.max(self.Q(next_obs))
td_error = torch.nn.functional.mse_loss(self.Q(obs)[action], td_target)
self.Q_optimizer.zero_grad()
td_error.backward()
writer.add_scalar("DQL/td_error", td_error.mean(), self.step_num)
self.Q_optimizer.step()
def replay_experience(self, batch_size = None):
"""
Vuelve a jugar usando la experiencia aleatoria almacenada
:param batch_size: Tamaño de la muestra a tomar de la memoria
:return:
"""
batch_size = batch_size if batch_size is not None else self.params['replay_batch_size']
experience_batch = self.memory.sample(batch_size)
self.learn_from_batch_experience(experience_batch)
self.training_steps_completed += 1
def learn_from_batch_experience(self, experiences):
"""
Actualiza la red neuronal profunda en base a lo aprendido en el conjunto de experiencias anteriores
:param experiences: fragmento de recuerdos anteriores
:return:
"""
batch_xp = Experience(*zip(*experiences))
obs_batch = np.array(batch_xp.obs)/255.0
action_batch = np.array(batch_xp.action).astype('int64')
reward_batch = np.array(batch_xp.reward)
if self.params["clip_reward"]:
reward_batch = np.sign(reward_batch)
next_obs_batch = np.array(batch_xp.next_obs)/255.0
done_batch = np.array(batch_xp.done)
if self.params['use_target_network']:
if self.step_num % self.params['target_network_update_frequency'] == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
td_target = reward_batch + ~done_batch *\
np.tile(self.gamma, len(next_obs_batch)) * \
torch.max(self.Q_target(next_obs_batch),1)[0].data.tolist()
td_target = torch.from_numpy(td_target)
else:
td_target = reward_batch + ~done_batch * \
np.tile(self.gamma, len(next_obs_batch)) * \
torch.max(self.Q(next_obs_batch).detach(),1)[0].data.tolist()
td_target = torch.from_numpy(td_target)
td_target = td_target.to(device)
action_idx = torch.from_numpy(action_batch).to(device)
td_error = torch.nn.functional.mse_loss(
self.Q(obs_batch).gather(1, action_idx.view(-1,1)),
td_target.float().unsqueeze(1))
self.Q_optimizer.zero_grad()
td_error.mean().backward()
self.Q_optimizer.step()
def save(self, env_name):
model_save_name = 'model.pt'
path = F"/content/drive/My Drive/{model_save_name}"
file_name = self.params['save_dir']+"DQL_"+env_name+".ptm"
agent_state = {"Q": self.Q.state_dict(),
"best_mean_reward": self.best_mean_reward,
"best_reward": self.best_reward}
torch.save(agent_state, file_name)
print("Estado del agente guardado en : ", file_name)
def load(self, env_name):
path = F"/content/drive/My Drive/trained_models/model.pt"
file_name = self.params['load_dir']+"DQL_"+env_name+".ptm"
agent_state = torch.load(file_name, map_location = lambda storage, loc: storage)
self.Q.load_state_dict(agent_state["Q"])
self.Q.to(device)
self.best_mean_reward = agent_state["best_mean_reward"]
self.best_reward = agent_state["best_reward"]
print("Cargado del modelo Q desde", file_name,
"que hasta el momento tiene una mejor recompensa media de: ",self.best_mean_reward,
" y una recompensa máxima de: ", self.best_reward)
if __name__ == "__main__":
env_conf = manager.get_environment_params()
env_conf["env_name"] = args.env
if args.test:
env_conf["episodic_life"] = False
reward_type = "LIFE" if env_conf["episodic_life"] else "GAME"
custom_region_available = False
for key, value in env_conf["useful_region"].items():
if key in args.env:
env_conf["useful_region"] = value
custom_region_available = True
break
if custom_region_available is not True:
env_conf["useful_region"] = env_conf["useful_region"]["Default"]
print("Configuración a utilizar:", env_conf)
atari_env = False
for game in Atari.get_games_list():
if game.replace("_", "") in args.env.lower():
atari_env = True
if atari_env:
environment = Atari.make_env(args.env, env_conf)
else:
environment = env_utils.ResizeReshapeFrames(gym.make(args.env))
obs_shape = environment.observation_space.shape
action_shape = environment.action_space.n
agent_params = manager.get_agent_params()
agent_params["test"] = args.test
agent_params["clip_reward"] = env_conf["clip_reward"]
agent = DeepQLearner(obs_shape, action_shape, agent_params)
episode_rewards = list()
previous_checkpoint_mean_ep_rew = agent.best_mean_reward
num_improved_episodes_before_checkpoint = 0
if agent_params['load_trained_model']:
try:
agent.load(env_conf['env_name'])
previous_checkpoint_mean_ep_rew = agent.best_mean_reward
except FileNotFoundError:
print("ERROR: no existe ningún modelo entrenado para este entorno. Empezamos desde cero")
episode = 0
while global_step_num < agent_params['max_training_steps']:
obs = environment.reset()
total_reward = 0.0
done = False
step = 0
while not done:
if env_conf['render'] or args.render:
environment.render()
action = agent.get_action(obs)
next_obs, reward, done, info = environment.step(action)
agent.memory.store(Experience(obs, action, reward, next_obs, done))
obs = next_obs
total_reward += reward
step += 1
global_step_num += 1
if done is True:
episode += 1
episode_rewards.append(total_reward)
if total_reward > agent.best_reward:
agent.best_reward = total_reward
if np.mean(episode_rewards) > previous_checkpoint_mean_ep_rew:
num_improved_episodes_before_checkpoint += 1
if num_improved_episodes_before_checkpoint >= agent_params['save_freq']:
previous_checkpoint_mean_ep_rew = np.mean(episode_rewards)
agent.best_mean_reward = np.mean(episode_rewards)
agent.save(env_conf['env_name'])
num_improved_episodes_before_checkpoint = 0
print("\n Episodio #{} finalizado con {} iteraciones. Con {} estados: recompensa = {}, recompensa media = {:.2f}, mejor recompensa = {}".
format(episode, step+1, reward_type, total_reward, np.mean(episode_rewards), agent.best_reward))
writer.add_scalar("main/ep_reward", total_reward, global_step_num)
writer.add_scalar("main/mean_ep_reward", np.mean(episode_rewards), global_step_num)
writer.add_scalar("main/max_ep_reward", agent.best_reward, global_step_num)
if agent.memory.get_size() >= 2*agent_params['replay_start_size'] and not args.test:
agent.replay_experience()
break
environment.close()
writer.close() |
py | b40c54ccbf8c75fc26da77d9d8618a776cdd3212 | class Solution:
def numRookCaptures(self, board: List[List[str]]) -> int:
return (lambda x:x('pR')+x('Rp'))([''.join(board[x]+[' ']+[i[y] for i in board]).replace('.','') for x in range(8) for y in range(8) if board[x][y]=='R'][0].count) |
py | b40c557881c0eb93dc15ba81412701b9d0c5eae5 | from __future__ import absolute_import, division, print_function
from libtbx.test_utils import approx_equal
import mmtbx.f_model
import random, time
from scitbx.array_family import flex
from mmtbx import bulk_solvent
from cctbx import adptbx
from cctbx import sgtbx
from cctbx.development import random_structure
import boost.python
ext = boost.python.import_ext("mmtbx_f_model_ext")
from mmtbx import bulk_solvent
if(1):
random.seed(0)
flex.set_random_seed(0)
def run_00():
time_aniso_u_scaler = 0
for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric:
#print symbol, "-"*50
space_group_info = sgtbx.space_group_info(symbol = symbol)
xrs = random_structure.xray_structure(
space_group_info = space_group_info,
elements = ["N"]*100,
volume_per_atom = 50.0,
random_u_iso = True)
# XXX ad a method to adptbx to do this
point_group = sgtbx.space_group_info(
symbol=symbol).group().build_derived_point_group()
adp_constraints = sgtbx.tensor_rank_2_constraints(
space_group=point_group,
reciprocal_space=True)
u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(),
adptbx.random_u_cart(u_scale=1,u_min=0.1))
u_indep = adp_constraints.independent_params(all_params=u_star)
u_star = adp_constraints.all_params(independent_params=u_indep)
b_cart_start=adptbx.u_as_b(adptbx.u_star_as_u_cart(xrs.unit_cell(), u_star))
#
tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3
b_cart_start = [b_cart_start[0]-tr,b_cart_start[1]-tr,b_cart_start[2]-tr,
b_cart_start[3],b_cart_start[4],b_cart_start[5]]
tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3
#
#print "Input b_cart :", " ".join(["%8.4f"%i for i in b_cart_start]), "tr:", tr
F = xrs.structure_factors(d_min = 2.0).f_calc()
u_star = adptbx.u_cart_as_u_star(
F.unit_cell(), adptbx.b_as_u(b_cart_start))
fbc = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star)
fc = F.structure_factors_from_scatterers(xray_structure=xrs).f_calc()
f_obs = F.customized_copy(data = flex.abs(fc.data()*fbc))
t0 = time.time()
#
obj = bulk_solvent.aniso_u_scaler(
f_model_abs = flex.abs(fc.data()),
f_obs = f_obs.data(),
miller_indices = f_obs.indices(),
adp_constraint_matrix = adp_constraints.gradient_sum_matrix())
time_aniso_u_scaler += (time.time()-t0)
b_cart_final = adptbx.u_as_b(adptbx.u_star_as_u_cart(f_obs.unit_cell(),
adp_constraints.all_params(tuple(obj.u_star_independent))))
#
obj = bulk_solvent.aniso_u_scaler(
f_model_abs = flex.abs(fc.data()),
f_obs = f_obs.data(),
miller_indices = f_obs.indices())
b_cart_final2 = adptbx.u_as_b(adptbx.u_star_as_u_cart(f_obs.unit_cell(),
tuple(obj.u_star)))
#
assert approx_equal(b_cart_final, b_cart_final2)
#print "Output b_cart:", " ".join(["%8.4f"%i for i in b_cart_final])
assert approx_equal(b_cart_start, b_cart_final, 1.e-4)
print("Time (aniso_u_scaler only): %6.4f"%time_aniso_u_scaler)
def run_01():
time_aniso_u_scaler = 0
for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric:
#print symbol, "-"*50
space_group_info = sgtbx.space_group_info(symbol = symbol)
xrs = random_structure.xray_structure(
space_group_info = space_group_info,
elements = ["N"]*100,
volume_per_atom = 50.0,
random_u_iso = True)
# XXX ad a method to adptbx to do this
point_group = sgtbx.space_group_info(
symbol=symbol).group().build_derived_point_group()
adp_constraints = sgtbx.tensor_rank_2_constraints(
space_group=point_group,
reciprocal_space=True)
u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(),
adptbx.random_u_cart(u_scale=1,u_min=0.1))
u_indep = adp_constraints.independent_params(all_params=u_star)
u_star = adp_constraints.all_params(independent_params=u_indep)
b_cart_start=adptbx.u_as_b(adptbx.u_star_as_u_cart(xrs.unit_cell(), u_star))
#
tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3
b_cart_start = [b_cart_start[0]-tr,b_cart_start[1]-tr,b_cart_start[2]-tr,
b_cart_start[3],b_cart_start[4],b_cart_start[5]]
tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3
#
#print "Input b_cart :", " ".join(["%8.4f"%i for i in b_cart_start]), "tr:", tr
F = xrs.structure_factors(d_min = 2.0).f_calc()
F = xrs.structure_factors(d_min = 2.0).f_calc()
u_star = adptbx.u_cart_as_u_star(
F.unit_cell(), adptbx.b_as_u(b_cart_start))
fbc = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star)
fc = F.structure_factors_from_scatterers(xray_structure=xrs).f_calc()
f_obs = F.customized_copy(data = flex.abs(fc.data()*fbc))
#print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data())
obj = bulk_solvent.aniso_u_scaler(
f_model_abs = flex.abs(fc.data()),
f_obs = f_obs.data(),
miller_indices = f_obs.indices(),
unit_cell = f_obs.unit_cell())
a = obj.a
####
#print "Input a :", " ".join(["%7.3f"%i for i in a])
overall_anisotropic_scale = mmtbx.f_model.ext.k_anisotropic(
f_obs.indices(), a, f_obs.unit_cell())
#print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()*overall_anisotropic_scale)
f_obs = abs(fc)
f_obs = f_obs.customized_copy(data = f_obs.data() * overall_anisotropic_scale)
#print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data())
#print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data())
t0 = time.time()
obj = bulk_solvent.aniso_u_scaler(
f_model_abs = flex.abs(fc.data()),
f_obs = f_obs.data(),
miller_indices = f_obs.indices(),
unit_cell = f_obs.unit_cell())
time_aniso_u_scaler += (time.time()-t0)
overall_anisotropic_scale = mmtbx.f_model.ext.k_anisotropic(
f_obs.indices(), obj.a, f_obs.unit_cell())
assert approx_equal(bulk_solvent.r_factor(f_obs.data(),
fc.data()*overall_anisotropic_scale), 0.0, 1.e-2) # XXX seems to be low
#print "Output a:", " ".join(["%7.3f"%i for i in obj.a])
assert approx_equal(a, obj.a, 1.e-3) # XXX can it be smaller?
print("Time (aniso_u_scaler only): %6.4f"%time_aniso_u_scaler)
def run_02():
time_aniso_u_scaler = 0
for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric:
#print symbol, "-"*50
space_group_info = sgtbx.space_group_info(symbol = symbol)
xrs = random_structure.xray_structure(
space_group_info = space_group_info,
elements = ["N"]*100,
volume_per_atom = 50.0,
random_u_iso = True)
xrs.scattering_type_registry(table = "wk1995")
# XXX ad a method to adptbx to do this
point_group = sgtbx.space_group_info(
symbol=symbol).group().build_derived_point_group()
adp_constraints = sgtbx.tensor_rank_2_constraints(
space_group=point_group,
reciprocal_space=True)
u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(),
adptbx.random_u_cart(u_scale=1,u_min=0.1))
u_indep = adp_constraints.independent_params(all_params=u_star)
u_star = adp_constraints.all_params(independent_params=u_indep)
b_cart_start=adptbx.u_as_b(adptbx.u_star_as_u_cart(xrs.unit_cell(), u_star))
#
tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3
b_cart_start = [b_cart_start[0]-tr,b_cart_start[1]-tr,b_cart_start[2]-tr,
b_cart_start[3],b_cart_start[4],b_cart_start[5]]
tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3
#
#print "Input b_cart :", " ".join(["%8.4f"%i for i in b_cart_start]), "tr:", tr
reg = xrs.scattering_type_registry(table="wk1995", d_min=1/12)
f_000 = reg.sum_of_scattering_factors_at_diffraction_angle_0()
F = xrs.structure_factors(d_min = 2.0).f_calc()
i = F.indices()
i.append([0,0,0])
d = F.data()
d.append(f_000)
F = F.customized_copy(indices = i, data = d)
u_star = adptbx.u_cart_as_u_star(
F.unit_cell(), adptbx.b_as_u(b_cart_start))
fbc = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star)
fc = F.structure_factors_from_scatterers(xray_structure=xrs).f_calc()
f_obs = F.customized_copy(data = flex.abs(fc.data()*fbc))
#print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data())
obj = bulk_solvent.aniso_u_scaler(
f_model_abs = flex.abs(fc.data()),
f_obs = f_obs.data(),
miller_indices = f_obs.indices(),
unit_cell = f_obs.unit_cell())
a = obj.a
####
#print "Input a :", " ".join(["%7.3f"%i for i in a])
overall_anisotropic_scale = mmtbx.f_model.ext.k_anisotropic(
f_obs.indices(), a, f_obs.unit_cell())
#print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()*overall_anisotropic_scale)
f_obs = abs(fc)
f_obs = f_obs.customized_copy(data = f_obs.data() * overall_anisotropic_scale)
#print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data())
#print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data())
t0 = time.time()
obj = bulk_solvent.aniso_u_scaler(
f_model_abs = flex.abs(fc.data()),
f_obs = f_obs.data(),
miller_indices = f_obs.indices(),
unit_cell = f_obs.unit_cell())
time_aniso_u_scaler += (time.time()-t0)
overall_anisotropic_scale = mmtbx.f_model.ext.k_anisotropic(
f_obs.indices(), obj.a, f_obs.unit_cell())
assert approx_equal(bulk_solvent.r_factor(f_obs.data(),
fc.data()*overall_anisotropic_scale), 0.0, 1.e-2) # XXX seems to be low
#print "Output a:", " ".join(["%7.3f"%i for i in obj.a])
assert approx_equal(a, obj.a, 1.e-4) # XXX can it be smaller?
assert overall_anisotropic_scale[len(overall_anisotropic_scale)-1]==1
print("Time (aniso_u_scaler only): %6.4f"%time_aniso_u_scaler)
if (__name__ == "__main__"):
t0 = time.time()
run_00()
run_01()
run_02() # same as run_01 but with f000 added
print("Time: %6.4f"%(time.time()-t0))
print("OK")
|
py | b40c55f9f6ea9bbcaff1e5f764bd5e1e5a0bfa48 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {
"fields": ( 'email', 'password'),
}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login',)})
)
add_fieldsets = (
(None, {'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}),
)
admin.site.register(models.User, UserAdmin)
|
py | b40c564bd38087e45893a0822911a1e7eb053de5 | import sublime
class ThreadProgress():
"""
Animates an indicator, [= ], in the status area while a thread runs
:param thread:
The thread to track for activity
:param message:
The message to display next to the activity indicator
:param success_message:
The message to display once the thread is complete
"""
def __init__(self, thread, message, success_message):
self.thread = thread
self.message = message
self.success_message = success_message
self.addend = 1
self.size = 8
sublime.set_timeout(lambda: self.run(0), 100)
def run(self, i):
if not self.thread.is_alive():
if hasattr(self.thread, 'result') and not self.thread.result:
sublime.status_message('')
return
sublime.status_message(self.success_message)
return
before = i % self.size
after = (self.size - 1) - before
sublime.status_message('%s [%s=%s]' % \
(self.message, ' ' * before, ' ' * after))
if not after:
self.addend = -1
if not before:
self.addend = 1
i += self.addend
sublime.set_timeout(lambda: self.run(i), 100)
|
py | b40c572ddc7bf0cb01975a510db93499472bdaed |
#import tqdm which creates the progress bar
from tqdm import tqdm
#number of lines in the file
total_lines=68742193
#open the uncompressed xml with read permission
file = open('dblp-2020-04-01.xml','r')
#create the progress bar and update it every 0.3 seconds
bar = tqdm(total= total_lines, mininterval=0.3, unit=" lines", initial=0)
#create a dictionary called yearlist that will store years and the publications of each
#with format year:number of publications
yearlist = {}
line = " "
while line:
#save the line to a variable
line = file.readline()
#check if <year> and </year> are found in line
if "<year>" in line and "</year>" in line:
#split the line off the <year> and </year>
year=line.split("<year>")[-1].split("</year>")[0]
#check if that year is already in the dictionary
if year in yearlist:
#get the value of the publications of that year and add one
yearlist[year] = yearlist.get(year)+1
else:
#if the year is not found in the dictionary, save 1 to that year in the dictionary
yearlist[year] = 1
#update the progress bar with each new line read
bar.update()
#close the progress bar and the file
bar.close()
file.close()
print("")
#Write the results to file with format <year> <number of publications in that year>
with open('publications.txt','w') as output:
print('#{} {}'.format('Year','Publications'),file=output)
for key in sorted(yearlist):
print('{} {}'.format(key,yearlist.get(key)),file=output)
print('{} {}'.format(key,yearlist.get(key))) |
py | b40c5916f9751cbb7aa09c6827b2eadbb7604f82 | from flask import Blueprint
bp = Blueprint('api', __name__)
from app.api import tokens, errors, users # noqa : E402, F401
|
py | b40c5958ded5a25d348878278de2d9af6b3ca346 | import os
import datetime
import numpy
import pandas
import abcutils.core
DATE_START = datetime.datetime(2017, 2, 14)
DATE_END = datetime.datetime(2018, 2, 15)
INPUT_DATASETS = {
'edison': 'summaries/edison-summaries_2017-02-14-2018-02-15.csv.gz',
'cori': 'summaries/cori-summaries_2017-02-14-2018-02-15.csv.gz',
'mira': 'summaries/mira-summaries_2017-02-14_2018-02-15.csv.gz',
}
CACHE_FILE = 'cache.hdf5'
def load_raw_datasets(input_datasets=None, verbose=True):
"""Load data from CSVs and synthesize metrics
Args:
input_datasets (dict): keyed by system name (mira, edison, cori); values
are path to the CSV containing the data for that system.
verbose (bool): announce what is happening before it happens
Returns:
Concatenated pandas.DataFrame with data from all CSVs
"""
if input_datasets is None:
input_datasets = INPUT_DATASETS
dataframes = []
for system, csvfile in input_datasets.items():
if csvfile.endswith('.csv'):
cache_file = csvfile[:-4] + "_cache.hdf5"
elif csvfile.endswith('.csv.gz'):
cache_file = csvfile[:-7] + "_cache.hdf5"
else:
cache_file = csvfile + "_cache.hdf5"
if os.path.isfile(cache_file):
if verbose:
print("Loading from cache %s" % cache_file)
dataframe = pandas.read_hdf(cache_file, 'summary')
else:
dataframe = abcutils.load_and_synthesize_csv(csvfile, system=system)
dataframe.to_hdf(cache_file,
key='summary',
mode='w',
format='fixed',
complevel=9,
complib='zlib')
if verbose:
print("Cached synthesized CSV to %s" % cache_file)
dataframes.append(dataframe)
dataframe = pandas.concat(dataframes, axis='rows')
return dataframe
def build_sc18_filters(dataframe):
"""Build generic data filters for the SC paper
Args:
dataframe (pandas.DataFrame): Raw dataset from load_and_synthesize_csv
Returns:
list: List of filters to be passed to ``abcutils.core.apply_filters``
along with ``dataframe``
"""
filters = []
# Constrain dates to those covered by the paper
filters.append(dataframe['_datetime_start'] < DATE_END)
filters.append(dataframe['_datetime_start'] >= DATE_START)
# Drop Darshan logs from jobs that didn't actually do significant I/O; this
# filters out a set of VPIC jobs that hit a bug (related to the Edison
# upgrade) that allowed them to finish correctly but never write their data
# out.
filters.append(dataframe['darshan_total_gibs_posix'] > 1.0)
# Some of the Mira data has invalid benchmark_ids; drop them
filters.append(dataframe['_benchmark_id'] != 'hacc_io_write_shared_write')
# The Haswell data is misleading since it used a tiny fraction of the system
filters.append(dataframe['_test_platform'] != 'cscratch@cori-haswell')
return filters
def clean_sc18_dataframe(dataframe, truncate_contention=False, drop_cf_above=1.2, inplace=True):
"""Patches holes and problems in dataset
Args:
dataframe (pandas.DataFrame): Raw dataset from load_and_synthesize_csv
truncate_contention (bool): If True, apply max(0.0, val) to all
derived contention values. Default value corresponds to what
was used in the paper.
drop_cf_above (float or None): Drop any records whose coverage factors
for bandwidth are above this value. Default value corresponds to
what was used in the paper.
inplace (bool): Modify dataframe in-place or return a modified copy
Returns:
pandas.DataFrame: DataFrame with gaps and invalid data (negatives, NaNs)
filled in with valid data (zeros, NaNs, etc)
"""
if not inplace:
dataframe = dataframe.copy()
# Reset the index to ensure that there are no degenerate indices in the final dataframe
dataframe.index = pandas.Index(data=numpy.arange(len(dataframe)), dtype='int64')
# Apply a filter to invalidate obviously bogus bandwidth coverage factors
if drop_cf_above is not None:
for index in dataframe[dataframe['coverage_factor_bw'] > drop_cf_above].index:
dataframe.loc[index, 'coverage_factor_bw'] = numpy.nan
# Drop some of the weird columns left over from the CSV
dataframe = dataframe.drop(
columns=[x for x in ['Unnamed: 0', 'index'] if x in dataframe.columns],
axis=1)
# if truncate_contention, do not allow contention to go below 0.0
if truncate_contention:
for metric in ['bw', 'opens', 'stats', 'ops']:
dataframe['contention_%s' % metric] = dataframe['contention_%s' % metric].apply(
func=lambda x: max(1.0 - x, 0.0))
return dataframe
def load_dataset(verbose=True, truncate_contention=False, drop_cf_above=1.2, filter_func=build_sc18_filters, *args, **kwargs):
"""Load dataset used for Year in the Life paper
Load the canonical dataset used for the "Year in the Life" paper, apply
global filters on the dataset, and add a few additional derived metrics.
Args:
verbose (bool): Print messages describing from where data is being
loaded
truncate_contention (bool): If True, apply max(0.0, val) to all
derived contention values. Default value corresponds to what
was used in the paper.
drop_cf_above (float or None): Drop any records whose coverage factors
for bandwidth are above this value. Default value corresponds to
what was used in the paper.
filter_func: Function that takes a dataframe as an argument and returns
a list of filters that can be passed to
``abcutils.core.apply_filters()``
Returns:
pandas.DataFrame: Loaded, filtered, and augmented dataset
"""
dataframe = load_raw_datasets(verbose=verbose, *args, **kwargs)
dataframe = clean_sc18_dataframe(
dataframe=dataframe,
truncate_contention=truncate_contention,
drop_cf_above=drop_cf_above)
if filter_func:
filtered_df = abcutils.core.apply_filters(dataframe, filter_func(dataframe), verbose).sort_values('_datetime_start').copy()
else:
filtered_df = dataframe.sort_values('_datetime_start').copy()
# Reset the index to ensure that there are no degenerate indices in the final dataframe
filtered_df.index = pandas.Index(data=numpy.arange(len(filtered_df)), dtype='int64')
del dataframe
return filtered_df
|
py | b40c5a0c627e0bc25447ff90ffc02af53320d7f3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional
from torchmetrics import Precision as _Precision
from torchmetrics import Recall as _Recall
from pytorch_lightning.metrics.utils import deprecated_metrics
class Precision(_Precision):
@deprecated_metrics(target=_Precision)
def __init__(
self,
num_classes: Optional[int] = None,
threshold: float = 0.5,
average: str = "micro",
multilabel: bool = False,
mdmc_average: Optional[str] = None,
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
is_multiclass: Optional[bool] = None,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
"""
This implementation refers to :class:`~torchmetrics.Precision`.
.. deprecated::
Use :class:`~torchmetrics.Precision`. Will be removed in v1.5.0.
"""
_ = num_classes, threshold, average, multilabel, mdmc_average, ignore_index, top_k, is_multiclass, \
compute_on_step, dist_sync_on_step, process_group, dist_sync_fn
class Recall(_Recall):
@deprecated_metrics(target=_Recall)
def __init__(
self,
num_classes: Optional[int] = None,
threshold: float = 0.5,
average: str = "micro",
multilabel: bool = False,
mdmc_average: Optional[str] = None,
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
is_multiclass: Optional[bool] = None,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
"""
This implementation refers to :class:`~torchmetrics.Recall`.
.. deprecated::
Use :class:`~torchmetrics.Recall`. Will be removed in v1.5.0.
"""
|
py | b40c5a538b9f31e58c5cce1ec56f0716230fed67 | from remote_code_execution_engine import schemas
from fastapi.testclient import TestClient
def test_WHEN_submission_works_THEN_return(client: TestClient,
execution: schemas.Execution,
mocker,
mock_send_task_no_error_submission: callable):
"""
Function for testing the submission post call when the call is done correctly
"""
mocker.patch(
'remote_code_execution_engine.api.api_v1.endpoints.evaluations.celery_client.send_task',
mock_send_task_no_error_submission
)
res = client.post('/api/v1/evaluations/', json=execution)
assert res.status_code == 200
def test_WHEN_submission_fails_THEN_raise(client: TestClient,
execution: schemas.Execution,
mocker,
mock_send_task_raise: callable):
"""
Function for testing the submission post call when the celery worker cannot process the execution
"""
mocker.patch(
'remote_code_execution_engine.api.api_v1.endpoints.evaluations.celery_client.send_task',
mock_send_task_raise
)
res = client.post('/api/v1/evaluations/', json=execution)
assert res.status_code == 500
assert "could not process the code execution" in res.text
def test_WHEN_test_code_is_not_properly_formatted_THEN_raise(client: TestClient,
execution: schemas.Execution,
mocker,
mock_send_task_no_error: callable):
"""
Function for testing the submission post call when the test code is not properly formatted
"""
mocker.patch(
'remote_code_execution_engine.api.api_v1.endpoints.evaluations.celery_client.send_task',
mock_send_task_no_error
)
res = client.post('/api/v1/evaluations/', json=execution)
assert res.status_code == 400
|
py | b40c5aa35ddce9ca8553f9d3f870396c46c29c64 | from anthill.framework.utils.translation import translate_lazy as _
from anthill.platform.conf.settings import *
import os
# Build paths inside the application like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f!+1fl_+1r6ccwt)asua4yum&1i(!$b617&gibfng5hq#8aq)9'
DEBUG = False
ADMINS = (
('Lysenko Vladimir', '[email protected]'),
)
# Database uri example:
SQLALCHEMY_DATABASE_URI = 'postgres://anthill_message@/anthill_message'
LOCATION = 'http://localhost:9609'
BROKER = 'amqp://guest:guest@localhost:5672'
# ROUTES_CONF = 'message.routes'
# APPLICATION_CLASS = 'message.apps.AnthillApplication'
APPLICATION_NAME = 'message'
APPLICATION_VERBOSE_NAME = _('Message')
APPLICATION_DESCRIPTION = _('Implements messaging system')
APPLICATION_ICON_CLASS = 'icon-envelop5'
APPLICATION_COLOR = 'green'
# SERVICE_CLASS = 'message.services.Service'
TEMPLATE_PATH = os.path.join(BASE_DIR, 'ui', 'templates')
LOCALE_PATH = os.path.join(BASE_DIR, 'locale')
CACHES["default"]["LOCATION"] = "redis://localhost:6379/19"
CACHES["default"]["KEY_PREFIX"] = "message.anthill"
EMAIL_SUBJECT_PREFIX = '[Anthill: message] '
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'anthill.framework.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'anthill.framework.utils.log.RequireDebugTrue',
},
},
'formatters': {
'anthill.server': {
'()': 'anthill.framework.utils.log.ServerFormatter',
'fmt': '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
'color': False,
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'anthill.server',
},
'anthill.server': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGGING_ROOT_DIR, 'message.log'),
'formatter': 'anthill.server',
'maxBytes': 100 * 1024 * 1024, # 100 MiB
'backupCount': 10
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'anthill.framework.utils.log.AdminEmailHandler'
}
},
'loggers': {
'anthill': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
'anthill.application': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'tornado.access': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'tornado.application': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'tornado.general': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'celery': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'celery.worker': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'celery.task': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'celery.redirected': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'asyncio': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
}
}
#########
# GEOIP #
#########
GEOIP_PATH = os.path.join(BASE_DIR, '../')
#########
# HTTPS #
#########
# HTTPS = {
# 'key_file': os.path.join(BASE_DIR, '../server.key'),
# 'crt_file': os.path.join(BASE_DIR, '../server.crt'),
# }
HTTPS = None
############
# GRAPHENE #
############
GRAPHENE = {
'SCHEMA': 'message.api.v1.public.schema',
'MIDDLEWARE': ()
}
|
py | b40c5b0bc3b0050509eabe0b4bf217c45a5e9241 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.electricity import electricity
def test_electricity():
"""Test module electricity.py by downloading
electricity.csv and testing shape of
extracted data has 158 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = electricity(test_path)
try:
assert x_train.shape == (158, 8)
except:
shutil.rmtree(test_path)
raise()
|
py | b40c5beca410f99b9c4c2c7a4f836169bd63d829 | # -*-coding: utf-8
"""
FileName: face_recognition.py
Author: kalentee
E-mail: [email protected]
Data: 2019-5-16 11:41
Work Speace: Visual Studio Code, Ubuntu18.04TLS, Anaconda5.1, Tensorflow1.13.1
The last modify time:2019-5-16 11:41
"""
import face_model
import face_image, face_preprocess
import argparse
import cv2
import os
import numpy as np
database = None
def get_args():
parser = argparse.ArgumentParser(description="argument of face recognition.")
parser.add_argument('pattern', choices=['camera', 'image', 'video'], help="the pattern of this programe work. camera: using camera to detect, image: detecting an image, video: detecting a vido")
parser.add_argument("--save-path", default="../datasets/output/", type=str, help="save path of the result")
parser.add_argument('--database-path', default='../datasets/database/', help='database path', type=str)
parser.add_argument('--image-size', default='112,112', help='corp image size')
parser.add_argument('--model', default='../models/model-r50-am-lfw/model,0000', help='path to load model.')
parser.add_argument('--ga-model', default='', help='path to load model.')
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
sgroup = parser.add_argument_group("select operation")
sgroup.add_argument('--save','-s', action='store_true', help="whether save teh result")
sgroup.add_argument('--npy','-n', action='store_true', help="choose to load npy file or image file")
igroup = parser.add_argument_group("operation of image detect")
igroup.add_argument("--image-path", default="../dataset/test_image/test.jpg", type=str, help="the image path")
vgroup = parser.add_argument_group("operation of video detect")
vgroup.add_argument("--video-path", default="../dataset/test_video/test.mp4", type=str, help="the video path")
ngroup = parser.add_argument_group("operation of npy file")
ngroup.add_argument("--npy-datas", default="../datasets/npy/datas.npy", type=str, help="path of datas.npy")
ngroup.add_argument("--npy-labels", default="../datasets/npy/labels.npy", type=str, help="path of labels.npy")
args = parser.parse_args()
return args
# 识别人脸
def recognition(imgs, threshold):
global database
labels = database[1]
datas = database[0]
name_list = []
for img in imgs:
dist_this = []
for data in datas:
dist = np.sum(np.square(img - data))
dist_this.append(dist)
#print(dist)
# 取最小的距离
min_dist = min(dist_this)
# 找到最小的距离在database里的编号
number = dist_this.index(min_dist)
print(labels[number], min_dist)
if min_dist < threshold:
name = labels[number]
else:
name = 'Unknow'
name_list.append(name)
return name_list
# 画出人脸位置
def draw_face_box(img, boxes_name=[], boxes=[]):
if len(boxes) != 0 and len(boxes_name) != 0:
for name ,box in zip(boxes_name, boxes):
cv2.rectangle(img, (box[0],box[1]), (box[2],box[3]), (0, 255, 0), 2, 8, 0)
cv2.putText(img, name, (box[0],box[1]), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 0, 255), thickness=2)
return img
# 加载人脸数据库
def load_database(img_path, model):
file_list = os.listdir(img_path)
database_datas = []
database_labels = []
# 设置只检测jpg, jpeg, npg格式的图片,可修改并且只遍历database_dir这一个文件夹下的文件,不遍历任何其子目录下的文件夹
for name in file_list:
path = os.path.join(img_path, name)
# 递归调用load_database加载人脸库
if os.path.isdir(path):
print(path, "is a dir")
data = load_database(path, model)
if len(data) != 0:
database_datas.extend(data[0])
database_labels.extend(data[1])
continue
# 丢弃非jpg, jpeg, png格式外的图片,可提出来自行调整
if not (os.path.splitext(path)[1] in ['.jpg', '.jpeg', '.png']):
print(path, "is not a picture")
continue
img = cv2.imread(path)
result = model.get_input(img)
if result:
img, _ = result
img = model.get_feature(img)
# 这里边是加载人脸库里的数据,故只可能一张图片加载一个人脸,用img[0]是降低维度
database_datas.append(img[0])
# 这里用多层分离分离出文件夹名称,及全部的人物类别
database_labels.append(os.path.split(os.path.split(path)[0])[1])
return database_datas, database_labels
# 以npy文件形式读取文件
def load_database_by_npy(args):
# 这里之最后加一道list是为了使其作为可迭代对象供后面使用,但是不可以用yield做返回,原因自行理解。
datas = list(np.load(args.npy_datas))
labels = list(np.load(args.npy_labels))
return datas, labels
# 使用调用cv2.VideoCapture检测
# 刚开始写他单纯是为了使用摄像头检测,但是想到后面可以用于视频的检测故而做了修改,改为detect
def detect(args, model, camera=False):
if camera:
_open = 0
elif args.video_path and not camera:
if not os.path.exists(args.video_path):
raise ValueError("Video path is not exist")
_open = args.video_path
else:
raise ValueError("Parameters are not exist: args.video_path/camera only one is true!")
print("open cap")
cap = cv2.VideoCapture(_open)
# 声明保存情况
writer = None
if args.save:
fps = cap.get(cv2.CAP_PROP_FPS)
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
writer = cv2.VideoWriter(os.path.join(args.save_path, 'output_r50.avi'), cv2.VideoWriter_fourcc(*'XVID'), fps, size)
while cap.isOpened():
# 读取图片
_, frame = cap.read()
# print(np.shape(frame))
# 提取人脸和 bounding_boxes, 这里用result接收而不是用unpack方式接受是避免在检测不到人脸时返回全空而造成的unpack错误
frame = detect_frame(args, frame, model)
#cv2.imshow("Video", frame)
if writer:
frame = cv2.flip(frame, 0)
writer.write(frame)
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
if args.save:
writer.release()
cap.release()
print("release cap")
cv2.destroyAllWindows()
print("destory all windows")
# 提取出公用的numpy数据检测方式
def detect_frame(args, frame, model):
# 引用result接受而不是直接unpack接受是为了防止在接收时出现返回值为空的unpack错误
result = model.get_input(frame)
if result != None:
imgs, (bboxes, _) = result
# 提取人脸特征,作为判断是否为同一个人的标准的学习值
imgs = model.get_feature(imgs)
# 获取人脸的标签
name_list = recognition(imgs, args.threshold)
# 修正bounding_boxes值为整数值,然后用作画人脸
print("this fram is: ", name_list)
bboxes = bboxes[:, 0:4].astype(int)
frame = draw_face_box(frame, name_list, bboxes)
return frame
# 检测一张图片并显示
def detect_image(args, model):
if not os.path.exists(args.image_path):
raise ValueError("Image path is not exist")
img = cv2.imread(args.image_path)
print("load image")
img = detect_frame(args, img, model)
#cv2.imshow("image", img)
if args.save:
cv2.imwrite(os.path.join(args.save_path, 'output.jpg'), img)
#if cv2.waitKey(500) & 0xFF == ord('q'):pass
cv2.destroyAllWindows()
print("destroy all windows")
#工厂函数,选择工作模式
def main(args):
# 因为只有一个函数用到了database,但是不是main函数直接调用这个函数,并且这个函数在其他函数里检测的的时候被使用,故而定义为全局变量跨函数调用
global database
model = face_model.FaceModel(args)
print("load model")
if args.npy:
database = load_database_by_npy(args)
print("load npy database")
else:
database = load_database(args.database_path, model)
print("load image database")
#选择工作模式
if args.pattern == 'camera':
detect(args, model, camera=True)
elif args.pattern == 'video':
detect(args, model)
elif args.pattern == 'image':
detect_image(args, model)
else:
print("Error partten!")
del database
exit(0)
del database
if __name__ == '__main__':
args = get_args()
main(args)
|
py | b40c5c09921477fd919423c3079a669ab2c24131 | """
Multiple concurrent queries
+++++++++++++++++++++++++++
Send a bunch of different SNMP GET requests to different peers all at once,
wait for responses asynchronously:
* with SNMPv1, community 'public' and
with SNMPv2c, community 'public' and
* over IPv4/UDP and
over IPv6/UDP
* to an Agent at demo.snmplabs.com:161 and
to an Agent at [::1]:161
* for instances of SNMPv2-MIB::system
SNMPv2-MIB::sysLocation.0 MIB objects
* Enable MIB lookup feature
"""#
from pysnmp.hlapi.v1arch.asyncore import *
# List of targets in the following format:
# ((authData, transportTarget, varNames), ...)
TARGETS = (
# 1-st target (SNMPv1 over IPv4/UDP)
(CommunityData('public', mpModel=0),
UdpTransportTarget(('demo.snmplabs.com', 161)),
(ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysLocation', 0)))),
# 2-nd target (SNMPv2c over IPv4/UDP)
(CommunityData('public'),
UdpTransportTarget(('demo.snmplabs.com', 161)),
(ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysLocation', 0)))),
# 3-nd target (SNMPv2c over IPv4/UDP) - same community and
# different transport address.
(CommunityData('public'),
Udp6TransportTarget(('::1', 161)),
(ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysContact', 0)),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysName', 0)))),
# N-th target
# ...
)
def cbFun(errorIndication, errorStatus, errorIndex, varBinds, **context):
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind]))
snmpDispatcher = SnmpDispatcher()
# Submit a bunch of initial GET requests
for authData, transportTarget, varBinds in TARGETS:
getCmd(snmpDispatcher, authData, transportTarget, *varBinds,
cbFun=cbFun, lookupMib=True)
snmpDispatcher.transportDispatcher.runDispatcher()
|
py | b40c5c1bea060fa5fd2ca6c9851a8b5314b565ac | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import directives
from docutils import statemachine
from dulwich import repo
from sphinx.util import logging
from sphinx.util.nodes import nested_parse_with_titles
import reno
from reno import config
from reno import defaults
from reno import formatter
from reno import loader
LOG = logging.getLogger(__name__)
class ReleaseNotesDirective(rst.Directive):
has_content = True
# FIXME(dhellmann): We should be able to build this information
# from the configuration options so we don't have to edit it
# manually when we add new options.
option_spec = {
'branch': directives.unchanged,
'reporoot': directives.unchanged,
'relnotessubdir': directives.unchanged,
'notesdir': directives.unchanged,
'version': directives.unchanged,
'collapse-pre-releases': directives.flag,
'earliest-version': directives.unchanged,
'stop-at-branch-base': directives.flag,
'ignore-notes': directives.unchanged,
'unreleased-version-title': directives.unchanged,
}
def _find_reporoot(self, reporoot_opt, relnotessubdir_opt):
"""Find root directory of project."""
reporoot = os.path.abspath(reporoot_opt)
# When building on RTD.org the root directory may not be
# the current directory, so look for it.
try:
return repo.Repo.discover(reporoot).path
except Exception:
pass
for root in ('.', '..', '../..'):
if os.path.exists(os.path.join(root, relnotessubdir_opt)):
return root
raise Exception(
'Could not discover root directory; tried: %s' % ', '.join([
os.path.abspath(root) for root in ('.', '..', '../..')
])
)
def run(self):
title = ' '.join(self.content)
branch = self.options.get('branch')
relnotessubdir = self.options.get(
'relnotessubdir', defaults.RELEASE_NOTES_SUBDIR,
)
reporoot = self._find_reporoot(
self.options.get('reporoot', '.'), relnotessubdir,
)
ignore_notes = [
name.strip()
for name in self.options.get('ignore-notes', '').split(',')
]
conf = config.Config(reporoot, relnotessubdir)
opt_overrides = {}
if 'notesdir' in self.options:
opt_overrides['notesdir'] = self.options.get('notesdir')
version_opt = self.options.get('version')
# FIXME(dhellmann): Force these flags True for now and figure
# out how Sphinx passes a "false" flag later.
# 'collapse-pre-releases' in self.options
opt_overrides['collapse_pre_releases'] = True
# Only stop at the branch base if we have not been told
# explicitly which versions to include.
opt_overrides['stop_at_branch_base'] = (version_opt is None)
if 'earliest-version' in self.options:
opt_overrides['earliest_version'] = self.options.get(
'earliest-version')
if 'unreleased-version-title' in self.options:
opt_overrides['unreleased_version_title'] = self.options.get(
'unreleased-version-title')
if branch:
opt_overrides['branch'] = branch
if ignore_notes:
opt_overrides['ignore_notes'] = ignore_notes
conf.override(**opt_overrides)
notesdir = os.path.join(relnotessubdir, conf.notesdir)
LOG.info('scanning %s for %s release notes' % (
os.path.join(conf.reporoot, notesdir),
branch or 'current branch'))
with loader.Loader(conf) as ldr:
if version_opt is not None:
versions = [
v.strip()
for v in version_opt.split(',')
]
else:
versions = ldr.versions
LOG.info('got versions %s' % (versions,))
text = formatter.format_report(
ldr,
conf,
versions,
title=title,
branch=branch,
)
source_name = '<%s %s>' % (__name__, branch or 'current branch')
result = statemachine.ViewList()
for line_num, line in enumerate(text.splitlines(), 1):
LOG.debug('%4d: %s', line_num, line)
result.append(line, source_name, line_num)
node = nodes.section()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.add_directive('release-notes', ReleaseNotesDirective)
metadata_dict = {
'version': reno.__version__,
'parallel_read_safe': True
}
return metadata_dict
|
py | b40c5c9d81d125f2dee1888fff67e054326be348 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#Description: Display ESP results for OpenVino inferencing Tiny Yolo V2 Model
#Require ESP installed on the same machine where runs and ENV variables set
#Leverage OpenCV to display results
#See command line parameters for additional information.
import os, platform, sys
import datetime, time, signal
import numpy as np
import cv2
import hashlib
from collections import deque
from argparse import ArgumentParser, SUPPRESS
## check ESP environment variables
if "DFESP_HOME" in os.environ:
dfesphome = os.environ["DFESP_HOME"]
else:
print("Error: Environment variable DFESP_HOME not set. Abort!")
sys.exit(1)
if platform.system() == 'Linux':
sys.path.append(dfesphome + '/lib')
else:
sys.path.append(dfesphome + '/bin')
import ezpubsub as ezps
from collections import deque
deltaTimeQueue = deque()
prevTime = -1
args = None
cv2_win_name = "SAS Tiny YOLO V2 Viewer"
videoWriter = None
loop = 0
maxloop = 3000
stayInLoop = True
currentImg = None
toberesized = False
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-C', '--oneColor', nargs='?', const=True, default=False, type=bool, required=False,
help='label background using one color')
args.add_argument('-w', '--width', default=None, type=int, required=False, help='Output width of image. Scale will be preserved')
#args.add_argument('-h', '--height', default=480, type=int, required=False, help='output height of image')
args.add_argument('-t', '--probThres', default=0.25, type=float, required=False,
help='probability threshold to filter')
args.add_argument('-i', '--ipAddr', default='localhost', type=str, required=False,
help='Ip Address default: localhost')
args.add_argument('-p', '--port', default='30003', type=str, required=False, help='Pub/Sub Port default: 30003')
args.add_argument('-e', '--espProj', default='yoloV2OpenVINO', type=str, required=False,
help='Project name default: yoloV2OpenVINO')
args.add_argument('-q', '--cq', default='cq', type=str, required=False,
help='Continuous Query name default: cq')
args.add_argument('-s', '--sw', default='w_score', type=str, required=False,
help='Score windows name default: w_score')
args.add_argument('-f', '--frame', default='image_in', type=str, required=False,
help='Image field name default: image_in')
args.add_argument('-a', '--autosize', nargs='?', const=True, default=False, type=bool, required=False,
help='Set CV window in autosize mode. Default True')
args.add_argument('--fullscreen', nargs='?', const=True, default=False, type=bool, required=False,
help='Set CV window in fullsize (override autosize mode). Default False')
args.add_argument('--showfps', nargs='?', const=True, default=False, type=bool, required=False,
help='Set CV window in fullsize (override autosize mode). Default False')
args.add_argument('--flip', nargs='?', const=True, default=False, type=bool, required=False,
help='Flip camera (Mirror mode). Default False')
args.add_argument('-v', '--video_out', default=None,
type=str, required=False, help='Output Video path')
args.add_argument('--noshow', nargs='?', const=True, default=False, type=bool, required=False,
help='Hide OpenCV output. Usefull to register video from a remote server')
args.add_argument('-h', '--help', action='help', default=SUPPRESS,
help='Show this help message and exit.')
return parser
def videoOutput(frame_in, exitfunc = False):
global videoWriter
global loop
if loop > maxloop or exitfunc:
videoWriter.release()
videoWriter = None
loop = 0
return
if videoWriter is None:
print("Video writer initialization.")
loop += 1
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
videoWriter = cv2.VideoWriter()
framerate = 30 / 4
width = frame_in.shape[1] # for detection model
height = frame_in.shape[0]
name = args.video_out #output_video + extension
success = videoWriter.open(name, fourcc,
framerate, (width, height), True)
if frame_in is not None:
videoWriter.write(frame_in)
def highlightImage(row, objectId, fps):
global currentImg
tableau10 = [(31, 119, 180), (255, 127, 14),
(127, 127, 127), (188, 189, 34),
(148, 103, 189), (140, 86, 75),
(227, 119, 194), (44, 160, 44),
(214, 39, 40), (23, 190, 207)]
color_palette = tableau10
n_colors = len(color_palette)
if args.frame in row.keys():
imageBlob = row[args.frame]
if imageBlob is None:
currentImg = None
return
else:
currentImg = None
return
#Each received row contains only the data of a single detected object
#If there are multiple object detected, the ObjectID is also incremented and return to 0 when another frame is analyzed.
#This code store the image each time an objectId == 0 is found and keep drawing bounding box on the same image till
#all detected object are received.
if objectId == 0:
nparr = np.frombuffer(imageBlob, dtype=np.uint8)
currentImg = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if args.width is not None:
image_h, image_w, _ = currentImg.shape
height = image_h * (args.width / image_w)
currentImg = cv2.resize(currentImg, (int(args.width), int(height)), cv2.INTER_LINEAR)
if args.flip:
#Flip horizzontaly Mirror effect
currentImg = cv2.flip(currentImg, 1)
if 'nObjects' in row.keys():
numberOfObjects = row['nObjects']
if numberOfObjects == 0:
return
else:
return
image_h, image_w, _ = currentImg.shape
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.6
thickness = 1
if objectId == 0:
## put current timestamp
text = datetime.datetime.now().strftime("%m/%d/%Y %H:%M:%S")
#font_scale = 0.6
size = cv2.getTextSize(text, font_face, font_scale, thickness)
text_width = int(size[0][0])
text_height = int(size[0][1])
cv2.putText(currentImg, text, (image_w - text_width - 2, image_h - text_height), font_face, font_scale, (0, 0, 0), thickness+1, cv2.LINE_AA)
cv2.putText(currentImg, text, (image_w - text_width - 2, image_h - text_height), font_face, font_scale, (240, 240, 240), thickness, cv2.LINE_AA)
## put FPS
text = "FPS=%.2f FrameId=%d" % (fps, row['id'])
#font_scale = 1
size = cv2.getTextSize(text, font_face, font_scale, thickness)
text_width = int(size[0][0])
text_height = int(size[0][1])
cv2.putText(currentImg, text, (+5, image_h - text_height), font_face, font_scale, (0, 0, 0), thickness+1, cv2.LINE_AA)
cv2.putText(currentImg, text, (+5, image_h - text_height), font_face, font_scale, (240, 240, 240), thickness, cv2.LINE_AA)
obj = row['classes']
prob = float(row['scores'])
probability = "(" + str(round(prob * 100, 1)) + "%)"
x = float(row['x_box'])
y = float(row['y_box'])
width = float(row['w_box'])
height = float(row['h_box'])
if prob < args.probThres:
return
if args.oneColor:
color_idx = 0
else:
color_idx = int(hashlib.sha1(obj.encode()).hexdigest(), 16) % n_colors
box_color = (color_palette[color_idx][2], color_palette[color_idx][1], color_palette[color_idx][0]) #(b,g,r)
x_min = int(image_w * (x - width / 2))
y_min = int(image_h * (y - height/ 2))
x_max = int(image_w * (x + width / 2))
y_max = int(image_h * (y + height/ 2))
if args.flip:
# flip coordinates
x_min_f = image_w - x_max
x_max = image_w - x_min
x_min = x_min_f
## draw bounding box
cv2.rectangle(currentImg, (x_min, y_min), (x_max, y_max), box_color, 1)
## draw object label
text = obj.strip() + " " + probability
if sum(box_color)/3 < 140:
text_color = (255, 255, 255) #(b,g,r)
else:
text_color = (16, 16, 16) #(b,g,r)
size = cv2.getTextSize(text, font_face, font_scale, thickness)
text_width = int(size[0][0])
text_height = int(size[0][1])
line_height = size[1]
margin = 2
text_x = x_min + margin
text_y = y_min - line_height - margin
# draw a filled rectangle around text
cv2.rectangle(currentImg, (text_x - margin, text_y + line_height + margin),
(text_x + text_width + margin, text_y - text_height - margin), box_color, -1)
cv2.putText(currentImg, text, (text_x, text_y), font_face, font_scale, text_color, thickness, cv2.LINE_AA)
def subCallbackCbFunc(row):
global prevTime
global deltaTimeQueue
global stayInLoop
global toberesized
frameId = row['id']
objectId = row['object_id']
## print out timing log
curDateTime = datetime.datetime.now()
curTime = time.perf_counter()
if prevTime != -1:
deltaTime = (curTime - prevTime) * 1000
deltaTimeQueue.appendleft(deltaTime)
if len(deltaTimeQueue) > 100:
deltaTimeQueue.pop()
avgDeltaTime = sum(deltaTimeQueue)/len(deltaTimeQueue)
fps = 1000 / avgDeltaTime
print("FrameId: %d\t Current Time: %s\tDelta Time: %.2fms\tAvg Delta Time: %.2fms" % (frameId, str(curDateTime),
deltaTime, avgDeltaTime))
else:
deltaTime = 0
avgDeltaTime = 0
fps = 0
print("FrameId: %d\t Current Time: %s" % (frameId, str(curTime)))
prevTime = curTime
if (currentImg is not None) and (objectId == 0):
if args.video_out is not None:
videoOutput(currentImg)
if not (display is None or len(display) == 0):
if not args.noshow:
cv2.imshow(cv2_win_name, currentImg)
k = cv2.waitKey(1) & 0xFF
if k == 27: # Esc key to stop
if args.video_out is not None:
videoOutput(None, True)
stayInLoop = False
highlightImage(row, objectId, fps)
#fix small windows issue in case of cv2.WINDOW_NORMAL
if toberesized:
image_h, image_w, _ = currentImg.shape
cv2.resizeWindow(cv2_win_name, image_w, image_h)
toberesized = False
return
def subCallbackErr(err):
global stayInLoop
print("Error:" + str(err))
stayInLoop = False
def main():
global toberesized
if args.fullscreen:
cv2.namedWindow(cv2_win_name, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
cv2.setWindowProperty(cv2_win_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
elif args.autosize:
cv2.namedWindow(cv2_win_name, cv2.WINDOW_AUTOSIZE)
else:
cv2.namedWindow(cv2_win_name, cv2.WINDOW_NORMAL)
toberesized=True
try:
sub = ezps.Subscriber(url, on_event=subCallbackCbFunc, on_error=subCallbackErr)
while stayInLoop:
time.sleep(0.02)
except KeyboardInterrupt:
if args.video_out is not None:
videoOutput(None, True)
except SystemExit:
if args.video_out is not None:
videoOutput(None, True)
finally:
raise SystemExit
if __name__ == '__main__':
if "DISPLAY" in os.environ:
display = os.environ["DISPLAY"]
print("Note: Images will be displayed at " + display)
elif platform.system() == "Windows":
print("Note: Images will be displayed at main display")
display = "Windows"
else:
print("Warning: Environment variable DISPLAY not set. No images will be shown.")
display = None
args = build_argparser().parse_args()
url = "dfESP://" + args.ipAddr + ":" + args.port + "/" + args.espProj + "/" + args.cq + "/" + args.sw
print("Connecting to:" + url)
main()
|
py | b40c5ca8994fd13a23c8e865983ed5827f5a43d0 | from collections import Counter
class Solution(object):
def findShortestSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
first = dict()
last = dict()
c = Counter()
m = 0
possible_values = []
for i, v in enumerate(nums):
first.setdefault(v, i)
last[v] = i
c[v] += 1
if c[v] == m:
possible_values.append(v)
elif c[v] > m:
possible_values = [v]
m = c[v]
return min(last[x] - first[x] + 1 for x in possible_values)
|
py | b40c5cf2c006b38e6e7e900bee3faeb2da1b95ca | """Dump chemRxiv data in JSONL format."""
import logging
import os
import sys
from datetime import datetime
import pkg_resources
from .utils.chemrxiv import ChemrxivAPI, download_full, parse_dump
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
today = datetime.today().strftime("%Y-%m-%d")
save_folder = pkg_resources.resource_filename("paperscraper", "server_dumps")
save_path = os.path.join(save_folder, f"chemrxiv_{today}.jsonl")
def chemrxiv(save_path: str = save_path) -> None:
"""Fetches all papers from biorxiv until current date, stores them in jsonl
format in save_path.
Args:
save_path (str, optional): Path where the dump is stored.
Defaults to save_path.
"""
# create API client
api = ChemrxivAPI()
# Download the data
download_full(save_folder, api)
# Convert to JSONL format.
parse_dump(save_folder, save_path)
|
py | b40c5d57d39d79680db43f7e309cc78de64cfe5e | #!/usr/bin/python
################################################################################
# 267ca6fe-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "267ca6fe-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKCU:\Software\Policies\Microsoft\Office\15.0\powerpoint\security', 'NoTBPromptUnsignedAddin')
# Output Lines
self.output = [r'HKCU:\Software\Policies\Microsoft\Office\15.0\powerpoint\security', ('NoTBPromptUnsignedAddin=' + str(dword))]
if dword == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\powerpoint'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\powerpoint\security'")
cli.powershell(r"Set-ItemProperty -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\powerpoint\security' -name 'NoTBPromptUnsignedAddin' -value 1 -Type DWord")
|
py | b40c5e776c5dae913e34723e73149a2f9750de20 | """Custom signals for the Lifecycle Management plugin."""
from django.apps import apps as global_apps
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from nautobot.extras.choices import RelationshipTypeChoices
from nautobot.extras.models import Relationship, RelationshipAssociation
def post_migrate_create_relationships(sender, apps=global_apps, **kwargs): # pylint: disable=unused-argument
"""Callback function for post_migrate() -- create Relationship records."""
# pylint: disable=invalid-name
SoftwareLCM = sender.get_model("SoftwareLCM")
ContentType = apps.get_model("contenttypes", "ContentType")
_Device = apps.get_model("dcim", "Device")
InventoryItem = apps.get_model("dcim", "InventoryItem")
_Relationship = apps.get_model("extras", "Relationship")
contract_lcm = sender.get_model("ContractLCM")
CVELCM = sender.get_model("CVELCM")
for relationship_dict in [
{
"name": "Software on Device",
"slug": "device_soft",
"type": RelationshipTypeChoices.TYPE_ONE_TO_MANY,
"source_type": ContentType.objects.get_for_model(SoftwareLCM),
"source_label": "Running on Devices",
"destination_type": ContentType.objects.get_for_model(_Device),
"destination_label": "Software Version",
},
{
"name": "Software on InventoryItem",
"slug": "inventory_item_soft",
"type": RelationshipTypeChoices.TYPE_ONE_TO_MANY,
"source_type": ContentType.objects.get_for_model(SoftwareLCM),
"source_label": "Running on Inventory Items",
"destination_type": ContentType.objects.get_for_model(InventoryItem),
"destination_label": "Software Version",
},
{
"name": "Contract to dcim.Device",
"slug": "contractlcm-to-device",
"type": RelationshipTypeChoices.TYPE_MANY_TO_MANY,
"source_type": ContentType.objects.get_for_model(contract_lcm),
"source_label": "Devices",
"destination_type": ContentType.objects.get_for_model(_Device),
"destination_label": "Contracts",
},
{
"name": "Contract to dcim.InventoryItem",
"slug": "contractlcm-to-inventoryitem",
"type": RelationshipTypeChoices.TYPE_ONE_TO_MANY,
"source_type": ContentType.objects.get_for_model(contract_lcm),
"source_label": "Inventory Items",
"destination_type": ContentType.objects.get_for_model(InventoryItem),
"destination_label": "Contract",
},
{
"name": "Software to CVE",
"slug": "soft_cve",
"type": RelationshipTypeChoices.TYPE_MANY_TO_MANY,
"source_type": ContentType.objects.get_for_model(SoftwareLCM),
"source_label": "Corresponding CVEs",
"destination_type": ContentType.objects.get_for_model(CVELCM),
"destination_label": "Affected Softwares",
},
]:
_Relationship.objects.get_or_create(name=relationship_dict["name"], defaults=relationship_dict)
@receiver(pre_delete, sender="nautobot_device_lifecycle_mgmt.SoftwareLCM")
def delete_softwarelcm_relationships(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Delete all SoftwareLCM relationships to Device and InventoryItem objects."""
soft_relationships = Relationship.objects.filter(slug__in=("device_soft", "inventory_item_soft"))
RelationshipAssociation.objects.filter(relationship__in=soft_relationships, source_id=instance.pk).delete()
@receiver(pre_delete, sender="dcim.Device")
def delete_device_software_relationship(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Delete Device relationship to SoftwareLCM object."""
soft_relationships = Relationship.objects.filter(slug__in=("device_soft", "inventory_item_soft"))
RelationshipAssociation.objects.filter(relationship__in=soft_relationships, destination_id=instance.pk).delete()
@receiver(pre_delete, sender="dcim.InventoryItem")
def delete_inventory_item_software_relationship(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Delete InventoryItem relationship to SoftwareLCM object."""
soft_relationships = Relationship.objects.filter(slug__in=("device_soft", "inventory_item_soft"))
RelationshipAssociation.objects.filter(relationship__in=soft_relationships, destination_id=instance.pk).delete()
@receiver(pre_delete, sender="nautobot_device_lifecycle_mgmt.SoftwareLCM")
def delete_software_to_cve_relationships(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Delete all SoftwareLCM relationships to CVELCM objects."""
soft_relationships = Relationship.objects.filter(slug__in=("cve_soft"))
RelationshipAssociation.objects.filter(relationship__in=soft_relationships, source_id=instance.pk).delete()
@receiver(pre_delete, sender="nautobot_device_lifecycle_mgmt.CVELCM")
def delete_cve_to_software_relationships(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Delete all CVELCM relationships to SoftwareLCM objects."""
soft_relationships = Relationship.objects.filter(slug__in=("cve_soft"))
RelationshipAssociation.objects.filter(relationship__in=soft_relationships, source_id=instance.pk).delete()
|
py | b40c608a635e4a2f2e9a89e4332aae89eb772d19 | """
ASGI config for poll project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'poll.settings')
application = get_asgi_application()
|
py | b40c616af6e9d40eb2a0710c268002351a494db5 | #!/usr/bin/env python
'''
Digit recognition from video.
Run digits.py before, to train and save the SVM.
Usage:
digits_video.py [{camera_id|video_file}]
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
import os
import sys
# local modules
import video
from common import mosaic
from digits import *
def main():
try:
src = sys.argv[1]
except:
src = 0
cap = video.create_capture(src)
classifier_fn = 'digits_svm.dat'
if not os.path.exists(classifier_fn):
print('"%s" not found, run digits.py first' % classifier_fn)
return
model = cv.ml.SVM_load(classifier_fn)
while True:
_ret, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
bin = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 31, 10)
bin = cv.medianBlur(bin, 3)
contours, heirs = cv.findContours( bin.copy(), cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)
try:
heirs = heirs[0]
except:
heirs = []
for cnt, heir in zip(contours, heirs):
_, _, _, outer_i = heir
if outer_i >= 0:
continue
x, y, w, h = cv.boundingRect(cnt)
if not (16 <= h <= 64 and w <= 1.2*h):
continue
pad = max(h-w, 0)
x, w = x - (pad // 2), w + pad
cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0))
bin_roi = bin[y:,x:][:h,:w]
m = bin_roi != 0
if not 0.1 < m.mean() < 0.4:
continue
'''
gray_roi = gray[y:,x:][:h,:w]
v_in, v_out = gray_roi[m], gray_roi[~m]
if v_out.std() > 10.0:
continue
s = "%f, %f" % (abs(v_in.mean() - v_out.mean()), v_out.std())
cv.putText(frame, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
'''
s = 1.5*float(h)/SZ
m = cv.moments(bin_roi)
c1 = np.float32([m['m10'], m['m01']]) / m['m00']
c0 = np.float32([SZ/2, SZ/2])
t = c1 - s*c0
A = np.zeros((2, 3), np.float32)
A[:,:2] = np.eye(2)*s
A[:,2] = t
bin_norm = cv.warpAffine(bin_roi, A, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR)
bin_norm = deskew(bin_norm)
if x+w+SZ < frame.shape[1] and y+SZ < frame.shape[0]:
frame[y:,x+w:][:SZ, :SZ] = bin_norm[...,np.newaxis]
sample = preprocess_hog([bin_norm])
digit = model.predict(sample)[1].ravel()
cv.putText(frame, '%d'%digit, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
cv.imshow('frame', frame)
cv.imshow('bin', bin)
ch = cv.waitKey(1)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
py | b40c6277a85189550bc533434862b4eb458b2ce7 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import copy
import re
import time
import mock
import pytest
from six import iteritems
from . import common
from datadog_checks.checks import AgentCheck
from datadog_checks.openstack.openstack import (
OpenStackCheck,
OpenStackProjectScope,
OpenStackUnscoped,
KeystoneCatalog,
IncompleteConfig,
IncompleteAuthScope,
IncompleteIdentity
)
instance = common.MOCK_CONFIG["instances"][0]
instance['tags'] = ['optional:tag1']
init_config = common.MOCK_CONFIG['init_config']
openstack_check = OpenStackCheck('openstack', init_config, {}, instances=[instance])
@pytest.fixture
def aggregator():
from datadog_checks.stubs import aggregator
aggregator.reset()
return aggregator
class MockHTTPResponse(object):
def __init__(self, response_dict, headers):
self.response_dict = response_dict
self.headers = headers
def json(self):
return self.response_dict
MOCK_HTTP_RESPONSE = MockHTTPResponse(
response_dict=common.EXAMPLE_AUTH_RESPONSE, headers={
"X-Subject-Token": "fake_token"})
MOCK_HTTP_PROJECTS_RESPONSE = MockHTTPResponse(response_dict=common.EXAMPLE_PROJECTS_RESPONSE, headers={})
def _test_bad_auth_scope(scope):
with pytest.raises(IncompleteAuthScope):
OpenStackProjectScope.get_auth_scope(scope)
def _test_bad_user(user):
with pytest.raises(IncompleteIdentity):
OpenStackProjectScope.get_user_identity(user)
def test_get_auth_scope():
for scope in common.BAD_AUTH_SCOPES:
_test_bad_auth_scope(scope)
for scope in common.GOOD_UNSCOPED_AUTH_SCOPES:
auth_scope = OpenStackProjectScope.get_auth_scope(scope)
assert auth_scope is None
auth_scope = OpenStackUnscoped.get_auth_scope(scope)
assert auth_scope is None
for scope in common.GOOD_AUTH_SCOPES:
auth_scope = OpenStackProjectScope.get_auth_scope(scope)
# Should pass through unchanged
assert auth_scope == scope.get('auth_scope')
def test_get_user_identity():
for user in common.BAD_USERS:
_test_bad_user(user)
for user in common.GOOD_USERS:
parsed_user = OpenStackProjectScope.get_user_identity(user)
assert parsed_user == {'methods': ['password'], 'password': user}
def test_from_config():
init_config = {'keystone_server_url': 'http://10.0.2.15:5000', 'nova_api_version': 'v2'}
bad_instance_config = {}
good_instance_config = {'user': common.GOOD_USERS[0]['user'],
'auth_scope': common.GOOD_AUTH_SCOPES[0]['auth_scope']}
with pytest.raises(IncompleteConfig):
OpenStackProjectScope.from_config(init_config, bad_instance_config)
with mock.patch(
'datadog_checks.openstack.openstack.OpenStackProjectScope.request_auth_token',
return_value=MOCK_HTTP_RESPONSE
):
append_config = good_instance_config.copy()
append_config['append_tenant_id'] = True
scope = OpenStackProjectScope.from_config(init_config, append_config)
assert isinstance(scope, OpenStackProjectScope)
assert scope.auth_token == 'fake_token'
assert scope.tenant_id == 'test_project_id'
# Test that append flag worked
assert scope.service_catalog.nova_endpoint == 'http://10.0.2.15:8773/test_project_id'
def test_unscoped_from_config():
init_config = {'keystone_server_url': 'http://10.0.2.15:5000', 'nova_api_version': 'v2'}
good_instance_config = {'user': common.GOOD_USERS[0]['user'],
'auth_scope': common.GOOD_UNSCOPED_AUTH_SCOPES[0]['auth_scope']}
mock_http_response = copy.deepcopy(common.EXAMPLE_AUTH_RESPONSE)
mock_http_response['token'].pop('catalog')
mock_http_response['token'].pop('project')
mock_response = MockHTTPResponse(response_dict=mock_http_response, headers={'X-Subject-Token': 'fake_token'})
with mock.patch(
'datadog_checks.openstack.openstack.OpenStackUnscoped.request_auth_token',
return_value=mock_response
):
with mock.patch(
'datadog_checks.openstack.openstack.OpenStackUnscoped.request_project_list',
return_value=MOCK_HTTP_PROJECTS_RESPONSE
):
with mock.patch(
'datadog_checks.openstack.openstack.OpenStackUnscoped.get_token_for_project',
return_value=MOCK_HTTP_RESPONSE
):
append_config = good_instance_config.copy()
append_config['append_tenant_id'] = True
scope = OpenStackUnscoped.from_config(init_config, append_config)
assert isinstance(scope, OpenStackUnscoped)
assert scope.auth_token == 'fake_token'
assert len(scope.project_scope_map) == 1
for _, scope in iteritems(scope.project_scope_map):
assert isinstance(scope, OpenStackProjectScope)
assert scope.auth_token == 'fake_token'
assert scope.tenant_id == '263fd9'
def test_get_nova_endpoint():
assert KeystoneCatalog.get_nova_endpoint(
common.EXAMPLE_AUTH_RESPONSE) == u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876'
assert KeystoneCatalog.get_nova_endpoint(
common.EXAMPLE_AUTH_RESPONSE,
nova_api_version='v2') == u'http://10.0.2.15:8773/'
def test_get_neutron_endpoint():
assert KeystoneCatalog.get_neutron_endpoint(common.EXAMPLE_AUTH_RESPONSE) == u'http://10.0.2.15:9292'
def test_from_auth_response():
catalog = KeystoneCatalog.from_auth_response(common.EXAMPLE_AUTH_RESPONSE, 'v2.1')
assert isinstance(catalog, KeystoneCatalog)
assert catalog.neutron_endpoint == u'http://10.0.2.15:9292'
assert catalog.nova_endpoint == u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876'
def test_ensure_auth_scope(aggregator):
instance = common.MOCK_CONFIG["instances"][0]
instance['tags'] = ['optional:tag1']
with pytest.raises(KeyError):
openstack_check.get_scope_for_instance(instance)
with mock.patch(
'datadog_checks.openstack.openstack.OpenStackProjectScope.request_auth_token',
return_value=MOCK_HTTP_RESPONSE
):
scope = openstack_check.ensure_auth_scope(instance)
assert openstack_check.get_scope_for_instance(instance) == scope
openstack_check._send_api_service_checks(scope, ['optional:tag1'])
aggregator.assert_service_check(
OpenStackCheck.IDENTITY_API_SC, status=AgentCheck.OK, tags=[
'optional:tag1', 'server:http://10.0.2.15:5000'])
# URLs are nonexistant, so return CRITICAL
aggregator.assert_service_check(OpenStackCheck.COMPUTE_API_SC, status=AgentCheck.CRITICAL)
aggregator.assert_service_check(OpenStackCheck.NETWORK_API_SC, status=AgentCheck.CRITICAL)
openstack_check._current_scope = scope
openstack_check.delete_current_scope()
with pytest.raises(KeyError):
openstack_check.get_scope_for_instance(instance)
def test_parse_uptime_string():
uptime_parsed = openstack_check._parse_uptime_string(
u' 16:53:48 up 1 day, 21:34, 3 users, load average: 0.04, 0.14, 0.19\n')
assert uptime_parsed.get('loads') == [0.04, 0.14, 0.19]
def test_cache_utils():
openstack_check.CACHE_TTL['aggregates'] = 1
expected_aggregates = {'hyp_1': ['aggregate:staging', 'availability_zone:test']}
with mock.patch(
'datadog_checks.openstack.OpenStackCheck.get_all_aggregate_hypervisors',
return_value=expected_aggregates
):
assert openstack_check._get_and_set_aggregate_list() == expected_aggregates
time.sleep(1.5)
assert openstack_check._is_expired('aggregates')
@mock.patch('datadog_checks.openstack.OpenStackCheck.get_all_servers', return_value=common.ALL_SERVER_DETAILS)
def test_server_exclusion(*args):
"""
Exclude servers using regular expressions.
"""
openstackCheck = OpenStackCheck("test", {
'keystone_server_url': 'http://10.0.2.15:5000',
'ssl_verify': False,
'exclude_server_ids': common.EXCLUDED_SERVER_IDS
}, {}, instances=common.MOCK_CONFIG)
# Retrieve servers
openstackCheck.server_details_by_id = copy.deepcopy(common.ALL_SERVER_DETAILS)
i_key = "test_instance"
server_ids = openstackCheck.get_servers_managed_by_hypervisor(i_key, False, False)
# Assert
# .. 1 out of 4 server ids filtered
assert len(server_ids) == 1
# Ensure the server IDs filtered are the ones expected
for server_id in server_ids:
assert server_id in common.FILTERED_SERVER_ID
@mock.patch('datadog_checks.openstack.OpenStackCheck.get_all_network_ids', return_value=common.ALL_IDS)
def test_network_exclusion(*args):
"""
Exclude networks using regular expressions.
"""
with mock.patch('datadog_checks.openstack.OpenStackCheck.get_stats_for_single_network') \
as mock_get_stats_single_network:
openstack_check.exclude_network_id_rules = set([re.compile(rule) for rule in common.EXCLUDED_NETWORK_IDS])
# Retrieve network stats
openstack_check.get_network_stats([])
# Assert
# .. 1 out of 4 network filtered in
assert mock_get_stats_single_network.call_count == 1
assert mock_get_stats_single_network.call_args[0][0] == common.FILTERED_NETWORK_ID
# cleanup
openstack_check.exclude_network_id_rules = set([])
@mock.patch(
'datadog_checks.openstack.OpenStackCheck._make_request_with_auth_fallback',
return_value=common.MOCK_NOVA_SERVERS)
@mock.patch('datadog_checks.openstack.OpenStackCheck.get_nova_endpoint',
return_value="http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876")
@mock.patch('datadog_checks.openstack.OpenStackCheck.get_auth_token', return_value="test_auth_token")
@mock.patch('datadog_checks.openstack.OpenStackCheck.get_project_name_from_id', return_value="tenant-1")
def test_cache_between_runs(self, *args):
"""
Ensure the cache contains the expected VMs between check runs.
"""
openstackCheck = OpenStackCheck("test", {
'keystone_server_url': 'http://10.0.2.15:5000',
'ssl_verify': False,
'exclude_server_ids': common.EXCLUDED_SERVER_IDS
}, {}, instances=common.MOCK_CONFIG)
# Start off with a list of servers
openstackCheck.server_details_by_id = copy.deepcopy(common.ALL_SERVER_DETAILS)
i_key = "test_instance"
# Update the cached list of servers based on what the endpoint returns
cached_servers = openstackCheck.get_all_servers(i_key, False)
assert 'server-1' not in cached_servers
assert 'server_newly_added' in cached_servers
|
py | b40c62f2d8b3c8f162c17b07ef43ddf951d994b0 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class StartLiveForwardTaskRequest(JDCloudRequest):
"""
开始直播拉流转推任务
"""
def __init__(self, parameters, header=None, version="v1"):
super(StartLiveForwardTaskRequest, self).__init__(
'/LiveForwardTask:start', 'GET', header, version)
self.parameters = parameters
class StartLiveForwardTaskParameters(object):
def __init__(self, taskIds):
"""
:param taskIds: 任务ID,批量用,分隔
"""
self.taskIds = taskIds
|
py | b40c630090db1f49a22f0b5a20d33f761e794857 | import os
EXAMPLE_DATA_DIR = os.path.dirname(__file__)
|
py | b40c633069559f2ef71205de0901d71503a394fd | # Copyright 2020, Futurewei Technologies
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import yaml
import argparse
from pyspark import SparkContext
from pyspark.sql import HiveContext
from din_model.pipeline.util import load_config, load_df, save_pickle_file
def generate_tf_statistics(df, tf_statis_path):
tfrecords_statistics = {}
tfrecords_statistics['distinct_records_count'] = df.count()
save_pickle_file(tfrecords_statistics, tf_statis_path)
def save_tfrecords(hive_context, trainready_table,
tfrecords_hdfs_path, tf_statis_path):
command = """select uckey_index, media_index, media_category_index,
net_type_index, gender_index, age_index, region_id_index,
interval_starting_time, keyword_indexes as keywords,
keyword_indexes_click_counts as click_counts,
keyword_indexes_show_counts as show_counts from {}""".format(trainready_table)
df = hive_context.sql(command)
generate_tf_statistics(df, tf_statis_path)
df.write.format("tfrecords").option("recordType",
"Example").mode('overwrite').save(tfrecords_hdfs_path)
if __name__ == "__main__":
sc, hive_context, cfg = load_config(description="generate tf records")
cfgp = cfg['pipeline']
trainready_table = cfgp['main_trainready']['trainready_output_table']
tfrecords_hdfs_path = cfgp['tfrecords']['tfrecords_hdfs_path']
tf_statis_path = cfgp['tfrecords']['tfrecords_statistics_path']
# save selected columns of train ready table as tfrecords.
save_tfrecords(hive_context, trainready_table,
tfrecords_hdfs_path, tf_statis_path)
sc.stop()
|
py | b40c643a9df7a78cd1fbe63e823b376df25c0006 | # Comment one
fname='Grace_kid.py'
fmt="# Comment one%cfname='Grace_kid.py'%cfmt=%c%s%c%cdef FT(): fd=open(fname, 'w+');fd.write(fmt %% (10,10,34,fmt,34,10,10))%cFT()"
def FT(): fd=open(fname, 'w+');fd.write(fmt % (10,10,34,fmt,34,10,10))
FT() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.