max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
aix360/algorithms/lbbe.py | PurplePean/AIX360 | 609 | 11103357 | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class LocalBBExplainer(ABC):
"""
LocalBBExplainer is the base class for local post-hoc black-box explainers (LBBE).
Such explainers are model agnostic and generally require access to model's predict function alone.
Examples include LIME[#1]_, SHAP[#2]_, etc..
References:
.. [#1] “Why Should I Trust You?” Explaining the Predictions of Any Classifier, ACM SIGKDD 2016.
<NAME>, <NAME>, <NAME>. https://arxiv.org/abs/1602.04938.
.. [#2] A Unified Approach to Interpreting Model Predictions, NIPS 2017.
Lundberg, <NAME> and Lee, Su-In. https://arxiv.org/abs/1705.07874
"""
def __init__(self, *argv, **kwargs):
"""
Initialize a LocalBBExplainer object.
ToDo: check common steps that need to be distilled here.
"""
@abc.abstractmethod
def set_params(self, *argv, **kwargs):
"""
Set parameters for the explainer.
"""
raise NotImplementedError
@abc.abstractmethod
def explain_instance(self, *argv, **kwargs):
"""
Explain an input instance x.
"""
raise NotImplementedError
|
extra_tests/snippets/stdlib_re.py | mainsail-org/RustPython | 11,058 | 11103368 |
import re
haystack = "Hello world"
needle = 'ello'
mo = re.search(needle, haystack)
print(mo)
# Does not work on python 3.6:
assert isinstance(mo, re.Match)
assert mo.start() == 1
assert mo.end() == 5
assert re.escape('python.exe') == 'python\\.exe'
p = re.compile('ab')
s = p.sub('x', 'abcabca')
# print(s)
assert s == 'xcxca'
idpattern = r'([_a-z][_a-z0-9]*)'
mo = re.search(idpattern, '7382 _boe0+2')
assert mo.group(0) == '_boe0'
# tes op range
assert re.compile('[a-z]').match('a').span() == (0, 1)
assert re.compile('[a-z]').fullmatch('z').span() == (0, 1)
# test op charset
assert re.compile('[_a-z0-9]*').match('_09az').group() == '_09az'
# test op bigcharset
assert re.compile('[你好a-z]*').match('a好z你?').group() == 'a好z你'
assert re.compile('[你好a-z]+').search('1232321 a好z你 !!?').group() == 'a好z你'
# test op repeat one
assert re.compile('a*').match('aaa').span() == (0, 3)
assert re.compile('abcd*').match('abcdddd').group() == 'abcdddd'
assert re.compile('abcd*').match('abc').group() == 'abc'
assert re.compile('abcd*e').match('abce').group() == 'abce'
assert re.compile('abcd*e+').match('abcddeee').group() == 'abcddeee'
assert re.compile('abcd+').match('abcddd').group() == 'abcddd'
# test op mark
assert re.compile('(a)b').match('ab').group(0, 1) == ('ab', 'a')
assert re.compile('a(b)(cd)').match('abcd').group(0, 1, 2) == ('abcd', 'b', 'cd')
# test op repeat
assert re.compile('(ab)+').match('abab')
assert re.compile('(a)(b)(cd)*').match('abcdcdcd').group(0, 1, 2, 3) == ('abcdcdcd', 'a', 'b', 'cd')
assert re.compile('ab()+cd').match('abcd').group() == 'abcd'
assert re.compile('(a)+').match('aaa').groups() == ('a',)
assert re.compile('(a+)').match('aaa').groups() == ('aaa',)
# test Match object method
assert re.compile('(a)(bc)').match('abc')[1] == 'a'
assert re.compile('a(b)(?P<a>c)d').match('abcd').groupdict() == {'a': 'c'}
# test op branch
assert re.compile(r'((?=\d|\.\d)(?P<int>\d*)|a)').match('123.2132').group() == '123'
assert re.sub(r'^\s*', 'X', 'test') == 'Xtest'
assert re.match(r'\babc\b', 'abc').group() == 'abc'
urlpattern = re.compile('//([^/#?]*)(.*)', re.DOTALL)
url = '//www.example.org:80/foo/bar/baz.html'
assert urlpattern.match(url).group(1) == 'www.example.org:80' |
wagtailmenus/apps.py | pierremanceaux/wagtailmenus | 329 | 11103374 | from django.apps import AppConfig
class WagtailMenusConfig(AppConfig):
name = 'wagtailmenus'
verbose_name = 'WagtailMenus'
|
examples/issues/so_recursion.py | tgolsson/appJar | 666 | 11103432 | import sys
sys.path.append("../../")
from appJar import gui
def loginButton(button):
if button == "Cancel":
app.stop()
else:
usr = app.getEntry("Username")
pwd = app.getEntry("Password")
login(usr,pwd)
def login(usr,pwd):
if usr == "1" and pwd == "1":
app.hide()
print ("Success go to next gui")
app.showSubWindow('Custom IRC')
else:
addLoginErrorMessage()
def addLoginErrorMessage():
app.opengui("Custon IRC Login")
app.addLabel("ErrorLabel", "Wrong username or password.")
def chatGUI(usr):
app = chatGUI("Custom IRC")
##app.addLabelOptionBox("Select Server", ["127.0.0.1"], 0, 0, 1)
##app.addListBox("chatBox",1, 0, 3, 2)
##app.addEntry("chatInput", 3, 0, 3)
app.go()
app = gui("Custom IRC Login")
app.addLabelEntry("Username")
app.addLabelSecretEntry("Password")
app.addButtons(["Submit", "Cancel"], loginButton)
#app.startSubWindow("Custom IRC")
#app.addLabel('aaa')
#app.stopSubWindow()
app.go()
|
scripts/automation/trex_control_plane/interactive/trex/wireless/examples/trex_path.py | timgates42/trex-core | 956 | 11103434 | import sys, os
cur_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(cur_dir, os.pardir))
sys.path.insert(0, os.path.join(cur_dir, os.pardir, os.pardir))
sys.path.insert(0, os.path.join(cur_dir, os.pardir, os.pardir, os.pardir)) |
bmtk/tests/builder/auxi/test_node_params.py | aaberbach/bmtk | 216 | 11103507 | <reponame>aaberbach/bmtk
import pytest
import numpy as np
from bmtk.builder.auxi.node_params import positions_columinar, positions_cuboid, positions_list
def test_positions_columinar():
np.random.seed(1)
points = positions_columinar(N=10, center=[0.0, 0.0, 0.0], height=10.0, min_radius=1.0, max_radius=2.0)
assert(points.shape == (10, 3))
# check height restriction
ys = points[:, 1]
assert(np.min(ys) >= -5.0)
assert(np.max(ys) <= 5.0)
# check radius restrictions
xz_plane = points[:,[0,2]]
dists = np.linalg.norm(xz_plane, axis=1)
assert(np.min(dists) >= 1.0)
assert(np.max(dists) <= 2.0)
def test_positions_cuboid():
np.random.seed(1)
points = positions_cuboid(N=10, center=[0.0, 0.0, 0.0], height=5.0, xside_length=2.0, yside_length=2.0,
min_dist=1.0)
assert(points.shape == (10, 3))
def test_positions_list():
assert(positions_list().shape == (2, 3))
if __name__ == '__main__':
# test_positions_columinar()
# test_positions_cuboid()
test_positions_list()
|
tests/test_service_catalog/test_views/test_customer/test_instance/test_instance_request_operation.py | Sispheor/squest | 112 | 11103542 | <gh_stars>100-1000
from django.urls import reverse
from service_catalog.models import Request
from service_catalog.models.instance import InstanceState
from tests.test_service_catalog.base_test_request import BaseTestRequest
class TestCustomerInstanceRequestOperation(BaseTestRequest):
def setUp(self):
super(TestCustomerInstanceRequestOperation, self).setUp()
self.test_instance.state = InstanceState.AVAILABLE
self.test_instance.save()
def test_can_create_operation_request(self):
# get number of request before submitting
current_request_number = Request.objects.all().count()
expected_request_number = current_request_number + 1
args = {
'instance_id': self.test_instance.id,
'operation_id': self.update_operation_test.id
}
data = {'text_variable': 'my_var'}
url = reverse('service_catalog:instance_request_new_operation', kwargs=args)
response = self.client.post(url, data=data)
self.assertEqual(302, response.status_code)
self.test_instance.refresh_from_db()
self.assertEqual(self.test_instance.state, InstanceState.AVAILABLE)
self.assertEqual(expected_request_number, Request.objects.all().count())
def test_cannot_request_non_valid_operation(self):
# operation belong to another service
args = {
'instance_id': self.test_instance.id,
'operation_id': self.update_operation_test_2.id
}
data = {'text_variable': 'my_var'}
url = reverse('service_catalog:instance_request_new_operation', kwargs=args)
response = self.client.post(url, data=data)
self.assertEqual(403, response.status_code)
def test_cannot_request_non_available_instance(self):
for state in [InstanceState.PENDING, InstanceState.PROVISIONING, InstanceState.DELETING, InstanceState.DELETED]:
self.test_instance.state = state
self.test_instance.save()
args = {
'instance_id': self.test_instance.id,
'operation_id': self.update_operation_test.id
}
data = {'text_variable': 'my_var'}
url = reverse('service_catalog:instance_request_new_operation', kwargs=args)
response = self.client.post(url, data=data)
self.assertEqual(403, response.status_code)
|
kuryr_kubernetes/handlers/health.py | digitalsimboja/kuryr-kubernetes | 155 | 11103638 | <reponame>digitalsimboja/kuryr-kubernetes
# Copyright 2018 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HealthRegister(object):
instance = None
def __init__(self):
self.registry = []
def register(self, elem):
self.registry.append(elem)
@classmethod
def get_instance(cls):
if not HealthRegister.instance:
HealthRegister.instance = cls()
return HealthRegister.instance
class HealthHandler(object):
"""Base class for health handlers."""
def __init__(self):
super(HealthHandler, self).__init__()
self._alive = True
self._ready = True
self._manager = HealthRegister.get_instance()
self._manager.register(self)
self._last_exception = None
def set_liveness(self, alive, exc=None):
if exc:
self._last_exception = exc
self._alive = alive
def set_readiness(self, ready):
self._ready = ready
def is_alive(self):
return self._alive
def is_ready(self, *args):
return self._ready
def get_last_exception(self):
return self._last_exception
|
ee/connectors/db/loaders/clickhouse_loader.py | nogamenofun98/openreplay | 3,614 | 11103646 | <reponame>nogamenofun98/openreplay
def insert_to_clickhouse(db, df, table: str):
df.to_sql(table, db.engine, if_exists='append', index=False)
|
src/dispatch/report/enums.py | roor0/dispatch | 3,417 | 11103669 | from dispatch.enums import DispatchEnum
class ReportTypes(DispatchEnum):
tactical_report = "Tactical Report"
executive_report = "Executive Report"
|
moviemon/__init__.py | toxygen/moviemon | 207 | 11103680 | <reponame>toxygen/moviemon
from .moviemon import main
|
naomi/__main__.py | kowo-zahl/Naomi | 194 | 11103701 | # -*- coding: utf-8 -*-
import sys
import logging
import argparse
from . import application
from . import app_utils
from . import coloredformatting as cf
from . import i18n
from . import paths
from . import profile
logo = cf.naomidefaults.logo
sto = cf.naomidefaults.sto
USE_STANDARD_MIC = application.USE_STANDARD_MIC
USE_TEXT_MIC = application.USE_TEXT_MIC
USE_BATCH_MIC = application.USE_BATCH_MIC
def main(args=None):
logger = logging.getLogger(__name__)
language = profile.get_profile_var(['language'])
if(not language):
language = 'en-US'
logger.warn(
' '.join([
'language not specified in profile,',
'using default ({})'.format(language)
])
)
translations = i18n.parse_translations(paths.data('locale'))
translator = i18n.GettextMixin(translations)
_ = translator.gettext
parser = argparse.ArgumentParser(description='Naomi Voice Control Center')
parser.add_argument(
'--debug',
action='store_true',
help='Show debug messages'
)
parser.add_argument(
'--passive-listen',
action='store_true',
help='Check for keyword and command in same input'
)
parser.add_argument(
'--repopulate',
action='store_true',
help='Rebuild configuration profile'
)
parser.add_argument(
'--save-passive-audio',
action='store_true',
help='Save passive recordings and transcripts for training'
)
parser.add_argument(
'--save-active-audio',
action='store_true',
help='Save active recordings and transcripts for training'
)
parser.add_argument(
'--save-noise',
action='store_true',
help='Save noise recordings for training'
)
parser.add_argument(
'--save-audio',
action='store_true',
help=' '.join([
'Save passive, active and noise audio recordings',
'and transcripts for training'
])
)
parser.add_argument(
'--listen-while-talking',
action='store_true',
help=' '.join([
'Continue to listen while talking. This allows you to interrupt',
'Naomi, but may also lead to Naomi attempting to respond to its',
'own voice.'
])
)
# Plugin Repository Management
pr_man = parser.add_mutually_exclusive_group(required=False)
pr_man.add_argument(
'--list-available-plugins',
nargs='*',
dest='list_available',
action='append',
help='List available plugins (by category) and exit'
)
pr_man.add_argument(
'--install',
nargs=1,
dest='plugins_to_install',
action='append',
help='Install plugin and exit'
)
pr_man.add_argument(
'--update',
nargs="?",
dest='plugins_to_update',
action='append',
help='Update specific plugin or all plugins and exit'
)
pr_man.add_argument(
'--remove',
nargs=1,
dest='plugins_to_remove',
action='append',
help='Remove (uninstall) plugins and exit'
)
pr_man.add_argument(
'--disable',
nargs=1,
dest='plugins_to_disable',
action='append',
help='Disable plugins and exit'
)
pr_man.add_argument(
'--enable',
nargs=1,
dest='plugins_to_enable',
action='append',
help='Enable plugins and exit'
)
list_info = parser.add_mutually_exclusive_group(required=False)
list_info.add_argument(
'--list-active-plugins',
action='store_true',
help='List active plugins and exit'
)
list_info.add_argument(
'--list-audio-devices',
action='store_true',
help='List audio devices and exit'
)
# input options
mic_mode = parser.add_mutually_exclusive_group(required=False)
mic_mode.add_argument(
'--local',
action='store_true',
help='Use text input/output instead of verbal interface'
)
mic_mode.add_argument(
'--batch',
dest='batch_file',
metavar="FILE",
type=argparse.FileType('r'),
help=' '.join([
'Batch mode using a text file with references to audio files'
])
)
mic_mode.add_argument(
'--print-transcript',
action='store_true',
help='Prints a transcription of things Naomi says and thinks it hears'
)
p_args = parser.parse_args(args)
print(logo)
print(" ___ ___ ___ ___ ")
print(" /\__\ /\ \ /\ \ /\__\ ___ ")
print(" /::| | /::\ \ /::\ \ /::| | /\ \ ")
print(" /:|:| | /:/\:\ \ /:/\:\ \ /:|:| | \:\ \ ")
print(" /:/|:| |__ /::\~\:\ \ /:/ \:\ \ /:/|:|__|__ /::\__\ ")
print(" /:/ |:| /\__\ /:/\:\ \:\__\ /:/__/ \:\__\ /:/ |::::\__\ /:/\/__/ ")
print(" \/__|:|/:/ / \/__\:\/:/ / \:\ \ /:/ / \/__/~~/:/ / __/:/ / ")
print(" |:/:/ / \::/ / \:\ /:/ / /:/ / /\/:/ / ")
print(" |::/ / /:/ / \:\/:/ / /:/ / \::/__/ ")
print(" /:/ / /:/ / \::/ / /:/ / \:\__\ ")
print(" \/__/ \/__/ \/__/ \/__/ \/__/ ")
print(sto)
# Set up logging
logging.basicConfig(
level=logging.DEBUG if p_args.debug else logging.ERROR
)
# Select Mic
used_mic = USE_STANDARD_MIC
if p_args.local:
# Use Local text mic
used_mic = USE_TEXT_MIC
elif p_args.batch_file is not None:
# Use batched mode mic, pass a file too
used_mic = USE_BATCH_MIC
# listen
# AaronC 2019-05-29
# This keeps an argument in a static location
# so we don't have to keep passing it from library
# to library. We need to know if the user wants to
# re-run populate.py when we examine the settings
# variable while instantiating plugin objects
# in plugin.GenericPlugin.__init__()
profile.set_arg("repopulate", p_args.repopulate)
if(p_args.listen_while_talking):
profile.set_arg("listen_while_talking", 'Yes')
else:
profile.set_arg(
"listen_while_talking",
app_utils.is_positive(
profile.get(
["listen_while_talking"],
'false'
)
)
)
# Run Naomi
app = application.Naomi(
use_mic=used_mic,
batch_file=p_args.batch_file,
repopulate=p_args.repopulate,
print_transcript=p_args.print_transcript,
passive_listen=p_args.passive_listen,
save_audio=p_args.save_audio,
save_passive_audio=p_args.save_passive_audio,
save_active_audio=p_args.save_active_audio,
save_noise=p_args.save_noise
)
if p_args.list_audio_devices:
app.list_audio_devices()
sys.exit(0)
if p_args.list_active_plugins:
print(_("Active Plugins:"))
active_plugins = app.npe.list_active_plugins()
len_name = max(len(active_plugins[info].name) for info in active_plugins)
len_version = max(len(active_plugins[info].version) for info in active_plugins)
for name in sorted(active_plugins):
info = active_plugins[name]
print(
"{} {} - {}".format(
info.name.ljust(len_name),
("(v%s)" % info.version).ljust(len_version),
info.description
)
)
sys.exit(0)
if p_args.list_available:
print(_("Available Plugins:"))
print_plugins = app.npe.list_available_plugins(p_args.list_available)
if(len(print_plugins) == 0):
print(_("Sorry, no plugins matched"))
else:
for name in sorted(print_plugins):
print(print_plugins[name])
sys.exit(0)
if p_args.plugins_to_install:
print(app.npe.install_plugins(p_args.plugins_to_install))
sys.exit(0)
if p_args.plugins_to_update:
print(app.npe.update_plugins(p_args.plugins_to_update))
sys.exit(0)
if p_args.plugins_to_remove:
print(app.npe.remove_plugins(p_args.plugins_to_remove))
sys.exit(0)
if p_args.plugins_to_enable:
print(app.npe.enable_plugins(p_args.plugins_to_enable))
sys.exit(0)
if p_args.plugins_to_disable:
print(app.npe.disable_plugins(p_args.plugins_to_disable))
sys.exit(0)
app.run()
if __name__ == '__main__':
main()
|
api/chalicelib/core/errors.py | nogamenofun98/openreplay | 3,614 | 11103708 | import json
from chalicelib.core import sourcemaps, sessions
from chalicelib.utils import pg_client, helper, dev
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import __get_step_size
def get(error_id, family=False):
if family:
return get_batch([error_id])
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
{"error_id": error_id})
cur.execute(query=query)
result = cur.fetchone()
if result is not None:
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
return helper.dict_to_camel_case(result)
def get_batch(error_ids):
if len(error_ids) == 0:
return []
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""
WITH RECURSIVE error_family AS (
SELECT *
FROM public.errors
WHERE error_id IN %(error_ids)s
UNION
SELECT child_errors.*
FROM public.errors AS child_errors
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
)
SELECT *
FROM error_family;""",
{"error_ids": tuple(error_ids)})
cur.execute(query=query)
errors = cur.fetchall()
for e in errors:
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
return helper.list_to_camel_case(errors)
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f'{o["name"]}@{v["version"]}',
"count": v["count"]
} for o in data for v in o["partition"]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o["name"],
"count": o["count"],
} for o in data
]
def __process_tags(row):
return [
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
{"name": "browser.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
{"name": "OS.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
{"name": "device",
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
{"name": "country", "partitions": row.pop("country_partition")}
]
def get_details(project_id, error_id, user_id, **data):
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
pg_sub_query24.append("error_id = %(error_id)s")
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
pg_sub_query30.append("error_id = %(error_id)s")
pg_basic_query = __get_basic_constraints(time_constraint=False)
pg_basic_query.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
density30 = int(data.get("density30", 30))
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_pg_query = f"""\
SELECT error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
last_session_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart24,
chart30
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_uuid) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM public.errors
INNER JOIN events.errors AS s_errors USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE error_id = %(error_id)s
GROUP BY error_id, name, message) AS details
INNER JOIN (SELECT error_id,
MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE error_id = %(error_id)s
GROUP BY error_id) AS time_details USING (error_id)
INNER JOIN (SELECT error_id,
session_id AS last_session_id,
user_os,
user_os_version,
user_browser,
user_browser_version,
user_device,
user_device_type,
user_uuid
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE error_id = %(error_id)s
ORDER BY errors.timestamp DESC
LIMIT 1) AS last_session_details USING (error_id)
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors
INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_basic_query)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_basic_query)}
AND sessions.user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS version_details
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_basic_query)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_basic_query)}
AND sessions.user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_details
GROUP BY count_per_os_details.name ) AS os_version_details
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_basic_query)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_basic_query)}
AND sessions.user_device_type = count_per_device_details.name
GROUP BY user_device
ORDER BY count DESC) AS count_per_device_v_details
GROUP BY count_per_device_details.name ) AS device_version_details
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_basic_query)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query24)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
query = cur.mogrify(
f"""SELECT error_id, status, session_id, start_ts,
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_errors AS fe
WHERE pe.error_id = fe.error_id
AND fe.user_id = %(user_id)s), FALSE) AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE pe.project_id = %(project_id)s
AND error_id = %(error_id)s
ORDER BY start_ts DESC
LIMIT 1;""",
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
cur.execute(query=query)
status = cur.fetchone()
if status is not None:
row["stack"] = format_first_stack_frame(status).pop("stack")
row["status"] = status.pop("status")
row["parent_error_id"] = status.pop("parent_error_id")
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["stack"] = []
row["last_hydrated_session"] = None
row["status"] = "untracked"
row["parent_error_id"] = None
row["favorite"] = False
row["viewed"] = False
return {"data": helper.dict_to_camel_case(row)}
def get_details_chart(project_id, error_id, user_id, **data):
pg_sub_query = __get_basic_constraints()
pg_sub_query.append("error_id = %(error_id)s")
pg_sub_query_chart = __get_basic_constraints(time_constraint=False, chart=True)
pg_sub_query_chart.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
if data.get("startDate") is None:
data["startDate"] = TimeUTC.now(-7)
else:
data["startDate"] = int(data["startDate"])
if data.get("endDate") is None:
data["endDate"] = TimeUTC.now()
else:
data["endDate"] = int(data["endDate"])
density = int(data.get("density", 7))
step_size = __get_step_size(data["startDate"], data["endDate"], density, factor=1)
params = {
"startDate": data['startDate'],
"endDate": data['endDate'],
"project_id": project_id,
"userId": user_id,
"step_size": step_size,
"error_id": error_id}
main_pg_query = f"""\
SELECT %(error_id)s AS error_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart
FROM (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
AND user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS count_per_version_details) AS browesr_version_details
ON (TRUE)) AS browser_details) AS browser_details
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_query) AS partition
FROM (SELECT COALESCE(user_os_version, 'unknown') AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
AND user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_query
) AS os_version_query ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
AND user_device_type = count_per_device_details.name
GROUP BY user_device_type, user_device
ORDER BY count DESC) AS count_per_device_details
) AS device_version_details ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query_chart)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details ON (TRUE);"""
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
return {"data": helper.dict_to_camel_case(row)}
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
project_key="project_id"):
ch_sub_query = [f"{project_key} =%(project_id)s"]
if time_constraint:
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
f"timestamp < %({endTime_arg_name})s"]
if chart:
ch_sub_query += [f"timestamp >= generated_timestamp",
f"timestamp < generated_timestamp + %({step_size_name})s"]
if platform == 'mobile':
ch_sub_query.append("user_device_type = 'mobile'")
elif platform == 'desktop':
ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def __get_sort_key(key):
return {
"datetime": "max_datetime",
"lastOccurrence": "max_datetime",
"firstOccurrence": "min_datetime"
}.get(key, 'max_datetime')
@dev.timed
def search(data, project_id, user_id, flows=False, status="ALL", favorite_only=False):
status = status.upper()
if status.lower() not in ['all', 'unresolved', 'resolved', 'ignored']:
return {"errors": ["invalid error status"]}
pg_sub_query = __get_basic_constraints(data.get('platform'), project_key="sessions.project_id")
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
"pe.project_id=%(project_id)s"]
pg_sub_query_chart = __get_basic_constraints(data.get('platform'), time_constraint=False, chart=True)
pg_sub_query_chart.append("source ='js_exception'")
pg_sub_query_chart.append("errors.error_id =details.error_id")
statuses = []
error_ids = None
if data.get("startDate") is None:
data["startDate"] = TimeUTC.now(-30)
if data.get("endDate") is None:
data["endDate"] = TimeUTC.now(1)
if len(data.get("events", [])) > 0 or len(data.get("filters", [])) > 0 or status != "ALL" or favorite_only:
statuses = sessions.search2_pg(data=data, project_id=project_id, user_id=user_id, errors_only=True,
error_status=status, favorite_only=favorite_only)
if len(statuses) == 0:
return {"data": {
'total': 0,
'errors': []
}}
error_ids = [e["error_id"] for e in statuses]
with pg_client.PostgresClient() as cur:
if data.get("startDate") is None:
data["startDate"] = TimeUTC.now(-7)
if data.get("endDate") is None:
data["endDate"] = TimeUTC.now()
density = data.get("density", 7)
step_size = __get_step_size(data["startDate"], data["endDate"], density, factor=1)
sort = __get_sort_key('datetime')
if data.get("sort") is not None:
sort = __get_sort_key(data["sort"])
order = "DESC"
if data.get("order") is not None:
order = data["order"]
params = {
"startDate": data['startDate'],
"endDate": data['endDate'],
"project_id": project_id,
"userId": user_id,
"step_size": step_size}
if error_ids is not None:
params["error_ids"] = tuple(error_ids)
pg_sub_query.append("error_id IN %(error_ids)s")
main_pg_query = f"""\
SELECT error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
chart
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_uuid) AS users,
COUNT(DISTINCT session_id) AS sessions,
MAX(timestamp) AS max_datetime,
MIN(timestamp) AS min_datetime
FROM events.errors
INNER JOIN public.errors AS pe USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY error_id, name, message
ORDER BY {sort} {order}) AS details
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors INNER JOIN public.errors AS m_errors USING (error_id)
WHERE {" AND ".join(pg_sub_query_chart)}
) AS sessions ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
cur.execute(cur.mogrify(main_pg_query, params))
total = cur.rowcount
if flows:
return {"data": {"count": total}}
row = cur.fetchone()
rows = []
limit = 200
while row is not None and len(rows) < limit:
rows.append(row)
row = cur.fetchone()
if total == 0:
rows = []
else:
if len(statuses) == 0:
query = cur.mogrify(
"""SELECT error_id, status, parent_error_id, payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_errors AS fe
WHERE errors.error_id = fe.error_id
AND fe.user_id = %(user_id)s LIMIT 1), FALSE) AS favorite,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE errors.error_id = ve.error_id
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
FROM public.errors
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
"user_id": user_id})
cur.execute(query=query)
statuses = cur.fetchall()
statuses = {
s["error_id"]: s for s in statuses
}
for r in rows:
if r["error_id"] in statuses:
r["status"] = statuses[r["error_id"]]["status"]
r["parent_error_id"] = statuses[r["error_id"]]["parent_error_id"]
r["favorite"] = statuses[r["error_id"]]["favorite"]
r["viewed"] = statuses[r["error_id"]]["viewed"]
r["stack"] = format_first_stack_frame(statuses[r["error_id"]])["stack"]
else:
r["status"] = "untracked"
r["parent_error_id"] = None
r["favorite"] = False
r["viewed"] = False
r["stack"] = None
offset = len(rows)
rows = [r for r in rows if r["stack"] is None
or (len(r["stack"]) == 0 or len(r["stack"]) > 1
or len(r["stack"]) > 0
and (r["message"].lower() != "script error." or len(r["stack"][0]["absPath"]) > 0))]
offset -= len(rows)
return {
"data": {
'total': total - offset,
'errors': helper.list_to_camel_case(rows)
}
}
def __save_stacktrace(error_id, data):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""UPDATE public.errors
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
WHERE error_id = %(error_id)s;""",
{"error_id": error_id, "data": json.dumps(data)})
cur.execute(query=query)
def get_trace(project_id, error_id):
error = get(error_id=error_id)
if error is None:
return {"errors": ["error not found"]}
if error.get("source", "") != "js_exception":
return {"errors": ["this source of errors doesn't have a sourcemap"]}
if error.get("payload") is None:
return {"errors": ["null payload"]}
if error.get("stacktrace") is not None:
return {"sourcemapUploaded": True,
"trace": error.get("stacktrace"),
"preparsed": True}
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
if all_exists:
__save_stacktrace(error_id=error_id, data=trace)
return {"sourcemapUploaded": all_exists,
"trace": trace,
"preparsed": False}
def get_sessions(start_date, end_date, project_id, user_id, error_id):
extra_constraints = ["s.project_id = %(project_id)s",
"s.start_ts >= %(startDate)s",
"s.start_ts <= %(endDate)s",
"e.error_id = %(error_id)s"]
if start_date is None:
start_date = TimeUTC.now(-7)
if end_date is None:
end_date = TimeUTC.now()
params = {
"startDate": start_date,
"endDate": end_date,
"project_id": project_id,
"userId": user_id,
"error_id": error_id}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
f"""SELECT s.project_id,
s.session_id::text AS session_id,
s.user_uuid,
s.user_id,
s.user_agent,
s.user_os,
s.user_browser,
s.user_device,
s.user_country,
s.start_ts,
s.duration,
s.events_count,
s.pages_count,
s.errors_count,
s.issue_types,
COALESCE((SELECT TRUE
FROM public.user_favorite_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
COALESCE((SELECT TRUE
FROM public.user_viewed_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
WHERE {" AND ".join(extra_constraints)}
ORDER BY s.start_ts DESC;""",
params)
cur.execute(query=query)
sessions_list = []
total = cur.rowcount
row = cur.fetchone()
while row is not None and len(sessions_list) < 100:
sessions_list.append(row)
row = cur.fetchone()
return {
'total': total,
'sessions': helper.list_to_camel_case(sessions_list)
}
ACTION_STATE = {
"unsolve": 'unresolved',
"solve": 'resolved',
"ignore": 'ignored'
}
def change_state(project_id, user_id, error_id, action):
errors = get(error_id, family=True)
print(len(errors))
status = ACTION_STATE.get(action)
if errors is None or len(errors) == 0:
return {"errors": ["error not found"]}
if errors[0]["status"] == status:
return {"errors": [f"error is already {status}"]}
if errors[0]["status"] == ACTION_STATE["solve"] and status == ACTION_STATE["ignore"]:
return {"errors": [f"state transition not permitted {errors[0]['status']} -> {status}"]}
params = {
"userId": user_id,
"error_ids": tuple([e["errorId"] for e in errors]),
"status": status}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""UPDATE public.errors
SET status = %(status)s
WHERE error_id IN %(error_ids)s
RETURNING status""",
params)
cur.execute(query=query)
row = cur.fetchone()
if row is not None:
for e in errors:
e["status"] = row["status"]
return {"data": errors}
MAX_RANK = 2
def __status_rank(status):
return {
'unresolved': MAX_RANK - 2,
'ignored': MAX_RANK - 1,
'resolved': MAX_RANK
}.get(status)
def merge(error_ids):
error_ids = list(set(error_ids))
errors = get_batch(error_ids)
if len(error_ids) <= 1 or len(error_ids) > len(errors):
return {"errors": ["invalid list of ids"]}
error_ids = [e["errorId"] for e in errors]
parent_error_id = error_ids[0]
status = "unresolved"
for e in errors:
if __status_rank(status) < __status_rank(e["status"]):
status = e["status"]
if __status_rank(status) == MAX_RANK:
break
params = {
"error_ids": tuple(error_ids),
"parent_error_id": parent_error_id,
"status": status
}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""UPDATE public.errors
SET parent_error_id = %(parent_error_id)s, status = %(status)s
WHERE error_id IN %(error_ids)s OR parent_error_id IN %(error_ids)s;""",
params)
cur.execute(query=query)
# row = cur.fetchone()
return {"data": "success"}
def format_first_stack_frame(error):
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
for s in error["stack"]:
for c in s.get("context", []):
for sci, sc in enumerate(c):
if isinstance(sc, str) and len(sc) > 1000:
c[sci] = sc[:1000]
# convert bytes to string:
if isinstance(s["filename"], bytes):
s["filename"] = s["filename"].decode("utf-8")
return error
def stats(project_id, user_id, startTimestamp=TimeUTC.now(delta_days=-7), endTimestamp=TimeUTC.now()):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""
SELECT COUNT(errors.*) AS unresolved_and_unviewed
FROM public.errors
INNER JOIN (SELECT root_error.error_id
FROM events.errors
INNER JOIN public.errors AS root_error USING (error_id)
WHERE project_id = %(project_id)s
AND timestamp >= %(startTimestamp)s
AND timestamp <= %(endTimestamp)s
AND source = 'js_exception') AS timed_errors USING (error_id)
LEFT JOIN (SELECT error_id FROM public.user_viewed_errors WHERE user_id = %(user_id)s) AS user_viewed
USING (error_id)
WHERE user_viewed.error_id ISNULL
AND errors.project_id = %(project_id)s
AND errors.status = 'unresolved'
AND errors.source = 'js_exception';""",
{"project_id": project_id, "user_id": user_id, "startTimestamp": startTimestamp,
"endTimestamp": endTimestamp})
cur.execute(query=query)
row = cur.fetchone()
return {
"data": helper.dict_to_camel_case(row)
}
|
tests/resources/test_worklog.py | Glushiator/jira | 1,639 | 11103709 | from tests.conftest import JiraTestCase
class WorklogTests(JiraTestCase):
def setUp(self):
JiraTestCase.setUp(self)
self.issue_1 = self.test_manager.project_b_issue1
self.issue_2 = self.test_manager.project_b_issue2
self.issue_3 = self.test_manager.project_b_issue3
def test_worklogs(self):
worklog = self.jira.add_worklog(self.issue_1, "2h")
worklogs = self.jira.worklogs(self.issue_1)
self.assertEqual(len(worklogs), 1)
worklog.delete()
def test_worklogs_with_issue_obj(self):
issue = self.jira.issue(self.issue_1)
worklog = self.jira.add_worklog(issue, "2h")
worklogs = self.jira.worklogs(issue)
self.assertEqual(len(worklogs), 1)
worklog.delete()
def test_worklog(self):
worklog = self.jira.add_worklog(self.issue_1, "1d 2h")
new_worklog = self.jira.worklog(self.issue_1, str(worklog))
self.assertEqual(new_worklog.author.name, self.test_manager.user_admin.name)
self.assertEqual(new_worklog.timeSpent, "1d 2h")
worklog.delete()
def test_worklog_with_issue_obj(self):
issue = self.jira.issue(self.issue_1)
worklog = self.jira.add_worklog(issue, "1d 2h")
new_worklog = self.jira.worklog(issue, str(worklog))
self.assertEqual(new_worklog.author.name, self.test_manager.user_admin.name)
self.assertEqual(new_worklog.timeSpent, "1d 2h")
worklog.delete()
def test_add_worklog(self):
worklog_count = len(self.jira.worklogs(self.issue_2))
worklog = self.jira.add_worklog(self.issue_2, "2h")
self.assertIsNotNone(worklog)
self.assertEqual(len(self.jira.worklogs(self.issue_2)), worklog_count + 1)
worklog.delete()
def test_add_worklog_with_issue_obj(self):
issue = self.jira.issue(self.issue_2)
worklog_count = len(self.jira.worklogs(issue))
worklog = self.jira.add_worklog(issue, "2h")
self.assertIsNotNone(worklog)
self.assertEqual(len(self.jira.worklogs(issue)), worklog_count + 1)
worklog.delete()
def test_update_and_delete_worklog(self):
worklog = self.jira.add_worklog(self.issue_3, "3h")
issue = self.jira.issue(self.issue_3, fields="worklog,timetracking")
worklog.update(comment="Updated!", timeSpent="2h")
self.assertEqual(worklog.comment, "Updated!")
# rem_estimate = issue.fields.timetracking.remainingEstimate
self.assertEqual(worklog.timeSpent, "2h")
issue = self.jira.issue(self.issue_3, fields="worklog,timetracking")
self.assertEqual(issue.fields.timetracking.remainingEstimate, "1h")
worklog.delete()
issue = self.jira.issue(self.issue_3, fields="worklog,timetracking")
self.assertEqual(issue.fields.timetracking.remainingEstimate, "3h")
|
buildroot/support/testing/tests/package/test_perl_html_parser.py | superm1/operating-system | 349 | 11103725 | <filename>buildroot/support/testing/tests/package/test_perl_html_parser.py
from tests.package.test_perl import TestPerlBase
class TestPerlHTMLParser(TestPerlBase):
"""
package:
HTML-Parser XS
direct dependencies:
HTML-Tagset
"""
config = TestPerlBase.config + \
"""
BR2_PACKAGE_PERL=y
BR2_PACKAGE_PERL_HTML_PARSER=y
"""
def test_run(self):
self.login()
self.module_test("HTML::Parser")
|
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/spotify/provider.py | DemarcusL/django_wiki_lab | 6,342 | 11103748 | from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class SpotifyAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("external_urls").get("spotify")
def get_avatar_url(self):
try:
return self.account.extra_data.get("images")[0].get("url")
except IndexError:
return None
def to_str(self):
dflt = super(SpotifyAccount, self).to_str()
return self.account.extra_data.get("display_name", dflt)
class SpotifyOAuth2Provider(OAuth2Provider):
id = "spotify"
name = "Spotify"
account_class = SpotifyAccount
def extract_uid(self, data):
return data["id"]
def extract_common_fields(self, data):
return dict(name=data.get("display_name"), email=data.get("email"))
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append("user-read-email")
return scope
provider_classes = [SpotifyOAuth2Provider]
|
sequana/freebayes_bcf_filter.py | khourhin/sequana | 138 | 11103752 | #
# This file is part of Sequana software
#
# Copyright (c) 2016-2021 - Sequana Development Team
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Analysis of VCF file generated by freebayes. This method need the version
0.10 of pysam.
"""
from pysam import VariantFile
from sequana.lazy import pandas as pd
from sequana.vcftools import strand_ratio, compute_strand_balance, compute_frequency
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["Variant"]
class Variant(object):
""" Variant class to stock variant reader and dictionary that resume most
important informations.
"""
def __init__(self, record):
""".. rubric:: constructor
:param RecordVariant record: variant record
:param dict resume: most important informations of variant
"""
self._record = record
self._resume = self._bcf_line_to_dict(record)
def __str__(self):
return str(self.record)
@property
def record(self):
return self._record
@property
def resume(self):
return self._resume
def _bcf_line_to_dict(self, bcf_line):
""" Convert a BCF line as a dictionnary with the most important
information to detect real variants.
"""
# Calcul all important information
alt_freq = compute_frequency(bcf_line)
strand_bal = compute_strand_balance(bcf_line)
line_dict = {"chr": bcf_line.chrom, "position": str(bcf_line.pos),
"depth": bcf_line.info["DP"], "reference": bcf_line.ref,
"alternative": "; ".join(str(x) for x in bcf_line.alts),
"freebayes_score": bcf_line.qual,
"strand_balance": "; ".join(
"{0:.2f}".format(x) for x in strand_bal),
"frequency": "; ".join(
"{0:.2f}".format(x) for x in alt_freq)}
try:
# If bcf is annotated by snpEff
annotation = bcf_line.info["EFF"][0].split("|")
effect_type, effect_lvl = annotation[0].split("(")
try:
prot_effect, cds_effect = annotation[3].split("/")
except ValueError:
cds_effect = annotation[3]
prot_effect = ""
ann_dict = {"CDS_position": cds_effect[2:],
"effect_type": effect_type,
"codon_change": annotation[2],
"gene_name": annotation[5],
"mutation_type": annotation[1],
"prot_effect": prot_effect[2:],
"prot_size": annotation[4],
"effect_impact": effect_lvl}
line_dict = dict(line_dict, **ann_dict)
except KeyError:
pass
return line_dict
class BCF_freebayes(VariantFile):
""" BCF_freebayes class (Binary Variant Calling Format)
This class is a wrapping of VariantFile class from the pysam package. It
is dedicated for VCF file generated by freebayes and compressed by
bcftools. BCF file is faster to parse than VCF. A data frame with all
variants is produced which can be write as csv file. It can filter variants
with a dictionnary of filter parameter. Filter variants are wrotte in a new
VCF file.
Example:
::
from sequana import sequana_data, BCF_freebayes
bcf_filename = sequana_data("test.bcf", "testing")
# Read the data
b = BCF_freebayes(bcf_filename)
# Filter the data
filter_dict = {'freebayes_score': 200,
'frequency': 0.8,
'min_depth': 10,
'forward_depth':3,
'reverse_depth':3,
'strand_ratio': 0.2}
b.filter_bcf(filter_dict, "output.vcf")
"""
def __init__(self, input_filename, **kwargs):
"""
:param str filename: a bcf file.
:param kwargs: any arguments accepted by VariantFile.
"""
try:
super().__init__(input_filename, **kwargs)
except OSError:
logger.error("OSError: {0} doesn't exist.".format(input_filename))
raise OSError
# initiate filters dictionary
self._filters = {'freebayes_score': 0,
'frequency': 0,
'min_depth': 0,
'forward_depth':0,
'reverse_depth':0,
'strand_ratio': 0}
@property
def filters(self):
""" Get or set the filters parameters to select variants of interest.
Setter take a dictionnary as parameter to update the attribute
:attr:`BCF_freebayes.filters`. Delete will reset different variable to
0.
::
bcf = BCF_freebayes("input.bcf")
bcf.filter = {"freebayes_score": 200,
"frequency": 0.8,
"min_depth": 10,
"forward_depth":3,
"reverse_depth":3,
"strand_ratio": 0.2}
"""
return self._filters
@filters.setter
def filters(self, d):
self._filters.update(d)
@filters.deleter
def filters(self):
self._filters = {"freebayes_score": 0,
"frequency": 0,
"min_depth": 0,
"forward_depth":0,
"reverse_depth":0,
"strand_ratio": 0}
def filter_bcf(self, filter_dict=None):
""" Filter variants in the BCF file and write them in a BCF file.
:param str output_filename: BCF output filename.
:param dict filter_dict: dictionary of filters. It updates the
attribute :attr:`BCF_freebayes.filters`
Return BCF_freebayes object with new BCF file.
"""
if filter_dict:
self.filters = filter_dict
variants = [Variant(v) for v in self if self._filter_line(v)]
# Rewind the iterator
self.reset()
return Filtered_freebayes(variants, self)
def _filter_line(self, bcf_line):
""" Filter variant with parameter set in :attr:`BCF_freebayes.filters`.
:param VariantRecord bcf_line
Return line if all filters are passed.
"""
# SRF="Number of reference observations on the forward strand
# SRR="Number of reference observations on the reverse strand
# SAF="Number of alternate observations on the forward strand
# SAR=Number of alternate observations on the reverse strand
if bcf_line.qual < self.filters["freebayes_score"]:
return False
if bcf_line.info["DP"] <= self.filters["min_depth"]:
return False
forward_depth = bcf_line.info["SRF"] + sum(bcf_line.info["SAF"])
if forward_depth <= self.filters["forward_depth"]:
return False
reverse_depth = bcf_line.info["SRR"] + sum(bcf_line.info["SAR"])
if reverse_depth <= self.filters["reverse_depth"]:
return False
alt_freq = compute_frequency(bcf_line)
if alt_freq[0] < self.filters["frequency"]:
return False
strand_bal = compute_strand_balance(bcf_line)
if strand_bal[0] < self.filters["strand_ratio"]:
return False
return True
class Filtered_freebayes(object):
""" Variants filtered with BCF_freebayes.
"""
_col_index = ['chr', 'position', 'reference', 'alternative', 'depth',
'frequency', 'strand_balance', 'freebayes_score',
'effect_type', 'mutation_type', 'effect_impact', 'gene_name',
'CDS_position', 'codon_change', 'prot_effect', 'prot_size']
def __init__(self, variants, bcf):
""".. rubric:: constructor
:param list variants: list of variants record.
:param BCF_freebayes bcf: class parent.
"""
self._variants = variants
self._bcf = bcf
self._df = self._bcf_to_df()
@property
def variants(self):
""" Get the variant list.
"""
return self._variants
@property
def df(self):
return self._df
def _bcf_to_df(self):
""" Create a data frame with the most important information contained
in the bcf file.
"""
dict_list = [v.resume for v in self.variants]
df = pd.DataFrame.from_records(dict_list)
try:
df = df[Filtered_freebayes._col_index]
except (ValueError, KeyError):
df = df[Filtered_freebayes._col_index[:len(df.columns)]]
return df
def to_csv(self, output_filename):
""" Write DataFrame in CSV format.
:params str output_filename: output CSV filename.
"""
with open(output_filename, "w") as fp:
print("# sequana_variant_calling; {0}".format(self._bcf.filters),
file=fp)
if self.df.empty:
print(",".join(Filtered_freebayes._col_index), file=fp)
else:
self.df.to_csv(fp, index=False)
def to_vcf(self, output_filename):
""" Write BCF file in VCF format.
:params str output_filename: output VCF filename.
"""
with open(output_filename, "w") as fp:
print(self._bcf.header, end="", file=fp)
for variant in self.variants:
print(variant, end="", file=fp)
|
xyjxyf/show_me_the_code.py | saurabh896/python-1 | 3,976 | 11103771 | # coding = utf-8
from tools import imager
from PIL import ImageFont
# 第 0000 题:将你的 QQ 头像(或者微博头像)右上角加上红色的数字
def add_num(image_path):
im = imager.open_image(image_path)
if im is not None:
font = ImageFont.truetype('Arial.ttf', 20)
w, h = im.size
point = (w - 10, 0)
imager.draw_text(im, "8", point, font)
im.show()
# 第 0001 题:使用 Python 生成 200 个激活码(或者优惠券)
import uuid
def create_activation_code(num=200):
codes = []
for i in range(num):
code = str(uuid.uuid1())
code = code.replace('-', '')
codes.append(code)
return codes
# 第 0002 题:将 0001 题生成的 200 个激活码(或者优惠券)保存到 MySQL 关系型数据库中。
import pymysql
def save_activation_code_to_mysql():
conn = pymysql.connect(host='localhost', user='root', charset='UTF8')
cur = conn.cursor()
cur.execute("CREATE DATABASE IF NOT EXISTS code_mysql")
cur.execute("USE code_mysql")
cur.execute("CREATE TABLE IF NOT EXISTS codes (id INT, code VARCHAR(255))")
codes = create_activation_code(200)
for code in codes:
cur.execute("INSERT INTO codes (code) values(%s)", [code])
conn.commit()
cur.execute("SELETE * FROM codes")
data = cur.fetchall()
print("data:%s" % data)
cur.close()
conn.close()
# 第 0003 题:将 0001 题生成的 200 个激活码(或者优惠券)保存到 Redis 非关系型数据库中。
import redis
def save_activation_code_to_redis():
re = redis.Redis(host='127.0.0.1', port=6379, db=0)
codes = create_activation_code(200)
for code in codes:
re.lpop('codes', code)
print("data:%s" % re.get('codes'))
# 第 0004 题:任一个英文的纯文本文件,统计其中的单词出现的个数。
import re
def number_of_words(file_path=None):
num = 0
if file_path is None:
return num
file = open(file_path, 'r')
content = file.read()
content = " " + content
pattern = re.compile(u'\s+\w+')
match = pattern.findall(content)
num = len(match)
return num
# 第 0005 题:你有一个目录,装了很多照片,把它们的尺寸变成都不大于 iPhone5 分辨率的大小。
def reset_images_size(dir_path=None):
if dir_path is None:
return
for root, dirs, files in os.walk(dir_path):
for path in files:
if path.startswith("."):
continue
file_path = os.path.join(root, path)
image = imager.open_image(file_path)
if image is not None:
new_image = imager.reset_image_size(image, 640, 1136)
imager.save(new_image, file_path)
# 第 0006 题:你有一个目录,放了你一个月的日记,都是 txt,
# 为了避免分词的问题,假设内容都是英文,请统计出你认为每篇日记最重要的词。
# 思路:哪个词出现的最多,哪个词就是最重要的词
import operator
def get_most_important_word(dir_path=None):
if dir_path is None:
return None
for root, dirs, files in os.walk(dir_path):
for path in files:
if not path.endswith("txt"):
continue
file_path = os.path.join(root, path)
content = open(file_path, 'r').read().lower()
words = content.split()
word_dic = {}
for word in words:
if word in word_dic.keys():
word_dic[word] += 1
else:
word_dic[word] = 1
if len(word_dic):
word_list = sorted(word_dic.items(), key=lambda x:x[1], reverse=True)
print("%s : %s -- %d" % (path, word_list[0][0], word_list[0][1]))
# 第 0007 题:有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。包括空行和注释,但是要分别列出来
import os
# 注释://, #, /* */, """ """, ''' '''
def lines_of_codes(dir_path=None):
code_num = 0
note_num = 0
blank_line_num = 0
if dir_path is None:
return code_num, note_num, blank_line_num
for root, dirs, files in os.walk(dir_path):
mut_note = None
for path in files:
if path.startswith("."):
continue
file_path = os.path.join(root, path)
for count, line in enumerate(open(file_path, "rU")):
# 判断是否是空行
if not line.split():
blank_line_num += 1
continue
# 判断是否多行注释
if mut_note is not None:
note_num += 1
match_note = re.match("\*/|\"\"\"|\'\'\'", line)
if match_note is not None:
mut_note = None
match_note = re.match("\/\*|\"\"\"|\'\'\'", line)
if match_note is not None:
mut_note = line[match_note.pos:(match_note.endpos - 1)]
continue
else:
match_note = re.match("\/\*|\"\"\"|\'\'\'", line)
if match_note is not None:
note_num += 1
mut_note = line[match_note.pos:(match_note.endpos - 1)]
continue
# 判断单行注释
match_note1 = re.match("\s*(#|//).*\n*", line)
if match_note1 is not None:
note_num += 1
continue
pass
code_num += count + 1
return code_num, note_num, blank_line_num
# 第 0008 题:一个HTML文件,找出里面的正文
# 使用 pip install beautifulsoup4
from bs4 import BeautifulSoup
def get_html_context(url=None):
if url is None:
return None
content = request.urlopen(url).read().decode("utf-8")
soup = BeautifulSoup(content)
[script.extract() for script in soup.find_all('script')]
[style.extract() for style in soup.find_all('style')]
soup.prettify()
reg = re.compile("<[^>]*>")
ret_content = reg.sub('', soup.prettify())
# print(ret_content)
return ret_content
# 第 0009 题:一个HTML文件,找出里面的链接
# 查找<a>, 用 HTMLParser
from urllib import request
from tools import dxhtmlparser
def get_html_links(url=None):
if url is None:
return None
content = request.urlopen(url).read().decode("utf-8")
dxparser = dxhtmlparser.DXHTMLParser('a', 'href', url)
dxparser.feed(content)
links = dxparser.getrets()
return links
# 第 0010 题:使用 Python 生成字母验证码图片
def create_verification_code():
im, str = imager.verification_code()
im.show()
# 第 0011 题: 敏感词文本文件 filtered_words.txt,里面的内容为以下内容,
# 北京, 程序员, 公务员, 领导, 牛比, 牛逼, 你娘, 你妈, love, sex, jiangge
# 当用户输入敏感词语时,则打印出 Freedom,否则打印出 Human Rights。
def find_sensitive_words(sensitive_file=None, input_string=None):
if sensitive_file is None or input_string is None:
return None
file = open(sensitive_file, "r")
sensitive_words = file.read().split()
is_sensitive = False
for sensitive in sensitive_words:
if sensitive in input_string:
is_sensitive = True
print("Freedom")
if not is_sensitive:
print("Human Rights")
# 第 0012 题: 敏感词文本文件 filtered_words.txt,里面的内容 和 0011题一样,
# 当用户输入敏感词语,则用 星号 * 替换,例如当用户输入「北京是个好城市」,则变成「**是个好城市」。
def replace_sensitive_words(sensitive_file=None, input_string=None):
if sensitive_file is None or input_string is None:
return None
file = open(sensitive_file, "r")
sensitive_words = file.read().split()
for sensitive in sensitive_words:
if sensitive in input_string:
replace_str = "*" * len(sensitive)
input_string = input_string.replace(sensitive, replace_str)
print(input_string)
# 第 0013 题: 用 Python 写一个爬图片的程序
from tools import geturlimgs
def get_url_imgs(url=None):
if url is None:
return None
tmp = geturlimgs.geturlimgs()
tmp.get_imgs(url, "/Users/xieyajie/Desktop/Python/ShowMeCode/xyjxyf/0013/")
# 第 0014 题: 纯文本文件 student.txt为学生信息, 写到 student.xls 文件中
# 第 0015 题: 纯文本文件 city.txt为城市信息,写到 city.xls 文件中
import json
import xlwt
def dictxt_to_xls(file_path=None):
if file_path is None:
return
file = open(file_path, 'r')
if file is None:
return
content = json.loads(file.read())
list_data = sorted(content.items(), key=lambda d:d[0])
(path, name)=os.path.split(file.name)
file_name = name.split('.')[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(file_name)
row = 0
for item in list_data:
col = 0
ws.write(row, col, item[0])
col += 1
value = item[1]
if type(value) == list:
for obj in value:
ws.write(row, col, obj)
col += 1
else:
ws.write(row, col, value)
row += 1
save_path = path + "/" + file_name + ".xls"
wb.save(save_path)
# 第 0016 题: 纯文本文件 numbers.txt, 请将上述内容写到 numbers.xls 文件中
def listtxt_to_xls(file_path=None):
if file_path is None:
return
file = open(file_path)
if file is None:
return
content = json.loads(file.read())
content.sort(key=lambda x:x[0])
(path, name)=os.path.split(file.name)
file_name = name.split('.')[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(file_name)
for i in range(len(content)):
col = 0
list = content[i]
for value in list:
ws.write(i, col, content[i][col])
col += 1
save_path = path + "/" + file_name + ".xls"
wb.save(save_path)
# pip3 install xlrd
import xlrd
def read_xls(file_path=None):
if file_path is None:
return None
data_list = {}
wb = xlrd.open_workbook(file_path)
sheet_names = wb.sheet_names()
for name in sheet_names:
table = wb.sheet_by_name(name)
table_data = []
for i in range(table.nrows):
row_data = table.row_values(i)
table_data.append(row_data)
data_list[name] = table_data
return data_list
# 第 0017 题: 将 第 0014 题中的 student.xls 文件中的内容写到 student.xml 文件中
# 使用DOM
from tools import stringer
from xml.dom.minidom import Document
def write_student_to_xml(dic=None, to_path=None):
if dic is None or to_path is None:
return None
doc = Document()
root_node = doc.createElement("root")
doc.appendChild(root_node)
stu_node = doc.createElement("students")
root_node.appendChild(stu_node)
note_node = doc.createComment("\n\t学生信息表\n\t\"id\" : [名字, 数学, 语文, 英文]\n\t")
stu_node.appendChild(note_node)
# data = json.dumps(dic, ensure_ascii=False, indent=1)
dic_node = doc.createTextNode(stringer.dict_to_json(dic, "\t\t"))
stu_node.appendChild(dic_node)
file = open(to_path, "w")
file.write(doc.toprettyxml())
# doc.writexml(file,' ',' ','\n','utf-8')
file.close()
# 第 0018 题: 将 第 0015 题中的 city.xls 文件中的内容写到 city.xml 文件中
# 使用lxml
import codecs
from lxml import etree
def write_city_to_xml(dic=None, to_path=None):
if dic is None or to_path is None:
return None
root_node = etree.Element('root')
root_node.text = "\n\t"
city_node = etree.SubElement(root_node, 'citys')
comment_node = etree.Comment("\n城市信息\n")
comment_node.tail = "\n\t"
city_node.append(comment_node)
city_node.text = "\n\t" + stringer.dict_to_json(dic, "\t") + u'\n'
city_node.tail = "\n"
city_tree = etree.ElementTree(root_node)
city_tree.write(to_path, pretty_print=True, xml_declaration=True, encoding='utf-8')
# output = codecs.open(to_path, 'w', 'utf-8')
# output.write(etree.tounicode(city_tree.getroot()))
# output.close()
# 第 0019 题: 将 第 0016 题中的 numbers.xls 文件中的内容写到 numbers.xml 文件中
def write_numbers_to_xml(list=None, to_path=None):
if list is None or to_path is None:
return None
root_node = etree.Element('root')
root_node.text = "\n\t"
number_node = etree.SubElement(root_node, 'numbers')
comment_node = etree.Comment("\n数字信息\n")
comment_node.tail = "\n\t"
number_node.append(comment_node)
number_node.text = "\n\t" + stringer.list_to_json(list, "\t") + u'\n'
number_node.tail = "\n"
number_tree = etree.ElementTree(root_node)
number_tree.write(to_path, pretty_print=True, xml_declaration=True, encoding='utf-8')
# 第 0020 题: 登陆中国联通网上营业厅 后选择「自助服务」 --> 「详单查询」,然后选择你要查询的时间段,点击「查询」按钮,
# 查询结果页面的最下方,点击「导出」,就会生成类似于 2014年10月01日~2014年10月31日通话详单.xls 文件。
# 写代码,对每月通话时间做个统计
import datetime
def statistics_month_time():
dic = {}
wb = xlrd.open_workbook("./0020/0020.xls")
sheet = wb.sheets()[0]
row_count = sheet.nrows
for i in range(1, sheet.nrows):
values = sheet.row_values(i)
ym_str = values[2][:6]
time_str = values[3]
if '时' in time_str:
time_str = re.sub('时', '.', time_str)
if '分' in time_str:
time_str = re.sub('分', '.', time_str)
if '秒' in time_str:
time_str = re.sub('秒', '', time_str)
tmp = time_str.split('.')
j = len(tmp) - 1
sum = int(tmp[j])
while j > -1:
sum = sum + (len(tmp) - 1 - j) * 60 * int(tmp[j])
j = j - 1
if ym_str in dic:
dic[ym_str] = dic[ym_str] + int(sum)
else:
dic[ym_str] = int(sum)
# i = i + 1
return dic
# 第 0021 题: 请使用 Python 对密码加密
from hashlib import sha256
from hmac import HMAC
def encrypt_password(password, salt=None):
if salt is None:
salt = os.urandom(8)
if isinstance(password, str):
password = password.encode('UTF-8')
ret = password
for i in range(10):
ret = HMAC(ret, salt, sha256).digest()
return salt + ret
# 第 0022 题: iPhone 6、iPhone 6 Plus 早已上市开卖。请查看你写得 第 0005 题的代码是否可以复用
# 第 0023 题: 使用 Python 的 Web 框架,做一个 Web 版本 留言簿 应用
# 第 0024 题: 使用 Python 的 Web 框架,做一个 Web 版本 TodoList 应用
# 第 0025 题: 使用 Python 实现:对着电脑吼一声,自动打开浏览器中的默认网站
# 在文件夹0025中实现
if __name__ == "__main__":
# 0000
# add_num(".0000/0000.jpg")
# 0001
# create_activation_code()
# 0002 ?????
# save_activation_code_to_mysql()
# 0003
# save_activation_code_to_redis()
# 0004
# number_of_words("./0004/0004.txt")
# 0005
# reset_images_size("./0005")
# 0006
# get_most_important_word("./0006")
# 0007
# code, note, blank_line = lines_of_codes("./0007")
# print("代码行数:%i\n注释行数:%i\n空行行数:%i" % (code, note, blank_line))
# 0008
# get_html_context("http://blog.bccn.net")
# 0009
# get_html_links("http://blog.bccn.net")
# 0010
# create_verification_code()
# 0011
# find_sensitive_words("./0011/0011.txt", "haha, 北京不错")
# 0012
# replace_sensitive_words("./0011/0011.txt", "haha, 北京不错")
# 0013
# get_url_imgs("http://www.ivsky.com/tupian/beijing_t1542/index_2.html")
# 0014
# dictxt_to_xls("./0014/student.txt")
# 0015
# dictxt_to_xls("./0015/city.txt")
# 0016
# listtxt_to_xls("./0016/numbers.txt")
# 0017
# student_dic = read_xls("./0017/student.xls")
# for key in student_dic:
# student = student_dic[key]
#
# dic = {}
# for list in student:
# dic[list[0]] = list[1:]
#
# write_student_to_xml(dic, "./0017/student.xml")
#
# break
# 0018
# city_dic = read_xls("./0018/city.xls")
# for key in city_dic:
# city = city_dic[key]
#
# dic = {}
# for list in city:
# dic[list[0]] = list[1:]
#
# write_city_to_xml(dic, "./0018/city.xml")
#
# break
# 0019
# number_dic = read_xls("./0019/numbers.xls")
# for key in number_dic:
# number = number_dic[key]
# write_numbers_to_xml(number, "./0019/numbers.xml")
#
# break
# 0020
# statistics_month_time()
# 0021
encrypt_password("<PASSWORD>")
# 0022
# 0023
# 0024
# 0025
|
veles/loader/saver.py | AkshayJainG/veles | 1,007 | 11103786 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Jan 23, 2015
Defines classes to save and to load an arbitrary Loader's output for 1 epoch.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import bz2
import gzip
from io import SEEK_END
import os
import numpy
from six import BytesIO
import snappy
from zope.interface import implementer
from veles import error
from veles.compat import from_none, lzma
from veles.config import root
from veles.loader.base import Loader, ILoader, CLASS_NAME, TRAIN
from veles.pickle2 import pickle, best_protocol
from veles.snapshotter import SnappyFile
from veles.units import Unit, IUnit
if not hasattr(gzip, "decompress"):
def decompress(data):
"""Decompress a gzip compressed string in one shot.
Return the decompressed string.
"""
with gzip.GzipFile(fileobj=gzip.io.BytesIO(data)) as f:
return f.read()
gzip.decompress = decompress
@implementer(IUnit)
class MinibatchesSaver(Unit):
"""Saves data from Loader to pickle file.
"""
CODECS = {
"raw": lambda f, _: f,
"snappy": lambda f, _: SnappyFile(f, "wb"),
"gz": lambda f, l: gzip.GzipFile(None, fileobj=f, compresslevel=l),
"bz2": lambda f, l: bz2.BZ2File(f, compresslevel=l),
"xz": lambda f, l: lzma.LZMAFile(f, preset=l)
}
def __init__(self, workflow, **kwargs):
super(MinibatchesSaver, self).__init__(workflow, **kwargs)
kwargs["view_group"] = kwargs.get("view_group", "SERVICE")
self.file_name = os.path.abspath(kwargs.get(
"file_name", os.path.join(root.common.dirs.cache,
"minibatches.dat")))
self.compression = kwargs.get("compression", "snappy")
self.compression_level = kwargs.get("compression_level", 9)
self.class_chunk_sizes = kwargs.get("class_chunk_sizes", (0, 0, 1))
self.offset_table = []
self.demand(
"minibatch_data", "minibatch_labels", "minibatch_class",
"class_lengths", "max_minibatch_size", "minibatch_size",
"shuffle_limit", "has_labels", "labels_mapping")
def init_unpickled(self):
super(MinibatchesSaver, self).init_unpickled()
self._file_ = None
@property
def file(self):
return self._file_
@property
def effective_class_chunk_sizes(self):
chunk_sizes = []
for ci, cs in enumerate(self.class_chunk_sizes):
if cs == 0:
cs = self.max_minibatch_size
elif cs > self.max_minibatch_size:
raise ValueError(
"%s's chunk size may not exceed max minibatch size = %d ("
"got %d)" % (CLASS_NAME[ci], self.max_minibatch_size, cs))
chunk_sizes.append(cs)
return tuple(chunk_sizes)
def initialize(self, **kwargs):
if self.shuffle_limit != 0:
raise error.VelesException(
"You must disable shuffling in your loader (set shuffle_limit "
"to 0)")
self._file_ = open(self.file_name, "wb")
pickle.dump(self.get_header_data(), self.file, protocol=best_protocol)
def get_header_data(self):
return self.compression, self.class_lengths, self.max_minibatch_size, \
self.effective_class_chunk_sizes, \
self.minibatch_data.shape, self.minibatch_data.dtype, \
self.minibatch_labels.shape if self.has_labels else None, \
self.minibatch_labels.dtype if self.has_labels else None, \
self.labels_mapping
def prepare_chunk_data(self):
self.minibatch_data.map_read()
self.minibatch_labels.map_read()
arr_data = numpy.zeros(
(self.effective_class_chunk_sizes[self.minibatch_class],) +
self.minibatch_data.shape[1:], dtype=self.minibatch_data.dtype)
if self.has_labels:
arr_labels = numpy.zeros(
(self.effective_class_chunk_sizes[self.minibatch_class],) +
self.minibatch_labels.shape[1:], self.minibatch_labels.dtype)
else:
arr_labels = None
return arr_data, arr_labels
def fill_chunk_data(self, prepared, interval):
prepared[0][:] = self.minibatch_data[interval[0]:interval[1]]
if self.has_labels:
prepared[1][:] = self.minibatch_labels[interval[0]:interval[1]]
def run(self):
prepared = self.prepare_chunk_data()
chunk_size = self.effective_class_chunk_sizes[self.minibatch_class]
chunks_number = int(numpy.ceil(self.max_minibatch_size / chunk_size))
for i in range(chunks_number):
self.offset_table.append(numpy.uint64(self.file.tell()))
file = MinibatchesSaver.CODECS[self.compression](
self.file, self.compression_level)
self.fill_chunk_data(
prepared, (i * chunk_size, (i + 1) * chunk_size))
pickle.dump(prepared, file, protocol=best_protocol)
file.flush()
def stop(self):
if self.file.closed:
return
pos = self.file.tell()
pickle.dump(self.offset_table, self.file, protocol=best_protocol)
self.debug("Offset table took %d bytes", self.file.tell() - pos)
self.file.close()
self.info("Wrote %s", self.file_name)
def decompress_snappy(data):
bio_in = BytesIO(data)
bio_out = BytesIO()
snappy.stream_decompress(bio_in, bio_out)
return bio_out.getvalue()
@implementer(ILoader)
class MinibatchesLoader(Loader):
CODECS = {
"raw": lambda b: b,
"snappy": decompress_snappy,
"gz": gzip.decompress,
"bz2": bz2.decompress,
"xz": lzma.decompress,
}
MAPPING = "minibatches_loader"
def __init__(self, workflow, **kwargs):
super(MinibatchesLoader, self).__init__(workflow, **kwargs)
self.file_name = kwargs["file_name"]
self._file_ = None
self.offset_table = []
self.chunk_numbers = None
self.mb_chunk_numbers = None
self.class_chunk_lengths = None
self.minibatch_data_shape = None
self.minibatch_data_dtype = None
self.minibatch_labels_shape = None
self.minibatch_labels_dtype = None
self.decompress = None
@property
def file(self):
return self._file_
def load_data(self):
self._file_ = open(self.file_name, "rb")
(codec, class_lengths, self.old_max_minibatch_size,
self.class_chunk_lengths,
self.minibatch_data_shape, self.minibatch_data_dtype,
self.minibatch_labels_shape, self.minibatch_labels_dtype,
self._labels_mapping) = \
pickle.load(self.file)
self.class_lengths[:] = class_lengths
self._has_labels = self.minibatch_labels_shape is not None
self._reversed_labels_mapping[:] = sorted(self.labels_mapping)
self.decompress = MinibatchesLoader.CODECS[codec]
self.chunk_numbers = []
for ci, cl in enumerate(self.class_lengths):
mb_chunks = int(numpy.ceil(self.old_max_minibatch_size /
self.class_chunk_lengths[ci]))
mb_count = int(numpy.ceil(cl / self.old_max_minibatch_size))
self.chunk_numbers.append(mb_chunks * mb_count)
class BytesMeasurer(object):
def __init__(self):
self.size = 0
def write(self, data):
self.size += len(data)
bm = BytesMeasurer()
fake_table = [numpy.uint64(i) for i in range(sum(self.chunk_numbers))]
pickle.dump(fake_table, bm, protocol=best_protocol)
self.file.seek(-bm.size, SEEK_END)
try:
self.offset_table = pickle.load(self.file)
except pickle.UnpicklingError as e:
self.error("Failed to read the offset table (table offset was %d)",
bm.size)
raise from_none(e)
for i, offset in enumerate(self.offset_table):
self.offset_table[i] = int(offset)
# Virtual end
self.offset_table.append(self.file.tell() - bm.size)
self.debug("Offsets: %s", self.offset_table)
if self.class_lengths[TRAIN] == 0:
assert self.normalization_type == "none", \
"You specified \"%s\" normalization but there are no train " \
"samples to analyze." % self.normalization_type
self.normalizer.analyze(self.minibatch_data.mem)
def create_minibatch_data(self):
self.minibatch_data.reset(numpy.zeros(
(self.max_minibatch_size,) + self.minibatch_data_shape[1:],
dtype=self.minibatch_data_dtype))
def fill_minibatch(self):
chunks_map = [
self.get_address(sample) + (i,) for i, sample in
enumerate(self.minibatch_indices.mem[:self.minibatch_size])]
chunks_map.sort()
prev_chunk_number = -1
chunk = None
for chunk_number, chunk_offset, index in chunks_map:
if prev_chunk_number != chunk_number:
prev_chunk_number = chunk_number
self.file.seek(self.offset_table[chunk_number])
buffer = self.file.read(self.offset_table[chunk_number + 1] -
self.offset_table[chunk_number])
chunk = pickle.loads(self.decompress(buffer))
mb_data, mb_labels = chunk
self.minibatch_data[index] = mb_data[chunk_offset]
if self.has_labels:
self.minibatch_labels[index] = mb_labels[chunk_offset]
def map_minibatch_labels(self):
# Already done in fill_minibatch()
pass
def get_address(self, index):
class_index, class_remainder = self.class_index_by_sample_index(index)
chunk_length = self.class_chunk_lengths[class_index]
chunk_number = sum(self.chunk_numbers[:class_index])
class_offset = self.class_lengths[class_index] - class_remainder
mb_chunks = int(numpy.ceil(self.old_max_minibatch_size / chunk_length))
mb_ind, mb_off = divmod(class_offset, self.old_max_minibatch_size)
chunk_number += mb_ind * mb_chunks
mb_ind, mb_off = divmod(mb_off, chunk_length)
return chunk_number, mb_off
|
sfaira/data/interactive/loader.py | theislab/sfaira | 110 | 11103792 | <reponame>theislab/sfaira
import anndata
from typing import Union
from sfaira.data import DatasetBase
class DatasetInteractive(DatasetBase):
def __init__(
self,
data: anndata.AnnData,
organism: str,
organ: str,
gene_symbol_col: Union[str, None] = 'index',
gene_ens_col: Union[str, None] = None,
obs_key_celltypes: Union[str, None] = None,
class_maps: dict = {},
dataset_id: str = "interactive_dataset",
data_path: Union[str, None] = ".",
meta_path: Union[str, None] = ".",
cache_path: Union[str, None] = ".",
):
"""
Load data set into sfaira data format.
:param data: Data set.
:param organism: Organism of data set.
:param organ: Organ of data set.
:param gene_symbol_col: Column name in .var which contains gene symbols. Set to "index" to use the index.
:param gene_ens_col: Column name in .var which contains ENSG symbols. Set to "index" to use the index.
:param obs_key_celltypes: .obs column name which contains cell type labels.
:param class_maps: Cell type class maps.
:param dataset_id: Identifer of data set.
:param data_path:
:param meta_path:
:param cache_path:
"""
super().__init__(data_path=data_path, meta_path=meta_path, cache_path=cache_path)
self.id = dataset_id
self.author = "interactive_dataset"
self.doi_journal = "interactive_dataset"
self.download_url_data = "."
self.download_url_meta = "."
# self.age # not currently supported
# self.assay_sc # not currently supported
# self.assay_differentiation # not currently supported
# self.assay_type_differentiation # not currently supported
# self.cell_line # not currently supported
# self.dev_stage # not currently supported
# self.ethnicity # not currently supported
# self.healthy # not currently supported
# self.normalisation # not currently supported
self.organ = organ
self.organism = organism
# self.sample_source # not currently supported
# self.sex # not currently supported
# self.state_exact # not currently supported
# self.year # not currently supported
self.obs_key_cell_types_original = obs_key_celltypes
# self.obs_key_age # not currently supported
# self.obs_key_assay_sc # not currently supported
# self.obs_key_assay_differentiation # not currently supported
# self.obs_key_assay_type_differentiation # not currently supported
# self.obs_key_cell_line # not currently supported
# self.obs_key_dev_stage # not currently supported
# self.obs_key_ethnicity # not currently supported
# self.obs_key_healthy # not currently supported
# self.obs_key_organ # not currently supported
# self.obs_key_organism # not currently supported
# self.obs_key_sample_source # not currently supported
# self.obs_key_sex # not currently supported
# self.obs_key_state_exact # not currently supported
self.gene_id_symbols_var_key = gene_symbol_col
self.gene_id_ensembl_var_key = gene_ens_col
self.class_maps = class_maps
self.adata = data
def _load(self):
pass
|
tests/test_choices_display_option.py | mfogel/django-timezone-field | 263 | 11103796 | import pytest
import pytz
from django import forms
from django.db import models
from timezone_field import TimeZoneField, TimeZoneFormField
common_tz_names = tuple(tz for tz in pytz.common_timezones)
common_tz_objects = tuple(pytz.timezone(tz) for tz in pytz.common_timezones)
class ChoicesDisplayForm(forms.Form):
limited_tzs = [
'Asia/Tokyo',
'Asia/Dubai',
'America/Argentina/Buenos_Aires',
'Africa/Nairobi',
]
limited_choices = [(tz, tz) for tz in limited_tzs]
tz_none = TimeZoneFormField()
tz_standard = TimeZoneFormField(choices_display='STANDARD')
tz_with_gmt_offset = TimeZoneFormField(choices_display='WITH_GMT_OFFSET')
tz_limited_none = TimeZoneFormField(choices=limited_choices)
tz_limited_standard = TimeZoneFormField(choices=limited_choices, choices_display='STANDARD')
tz_limited_with_gmt_offset = TimeZoneFormField(
choices=limited_choices,
choices_display='WITH_GMT_OFFSET',
)
class ChoicesDisplayModel(models.Model):
limited_tzs = [
'Asia/Tokyo',
'Asia/Dubai',
'America/Argentina/Buenos_Aires',
'Africa/Nairobi',
]
limited_choices = [(tz, tz) for tz in limited_tzs]
tz_none = TimeZoneField()
tz_standard = TimeZoneField(choices_display='STANDARD')
tz_with_gmt_offset = TimeZoneField(choices_display='WITH_GMT_OFFSET')
tz_limited_none = TimeZoneField(choices=limited_choices)
tz_limited_standard = TimeZoneField(choices=limited_choices, choices_display='STANDARD')
tz_limited_with_gmt_offset = TimeZoneField(
choices=limited_choices,
choices_display='WITH_GMT_OFFSET',
)
class ChoicesDisplayModelForm(forms.ModelForm):
class Meta:
model = ChoicesDisplayModel
fields = '__all__'
def test_db_field_invalid_choices_display():
with pytest.raises(ValueError):
TimeZoneField(choices_display='invalid')
def test_form_field_invalid_choices_display():
with pytest.raises(ValueError):
TimeZoneFormField(choices_display='invalid')
def test_form_field_none():
form = ChoicesDisplayForm()
values, displays = zip(*form.fields['tz_none'].choices)
assert values == common_tz_names
assert displays[values.index('America/Los_Angeles')] == 'America/Los Angeles'
assert displays[values.index('Asia/Kolkata')] == 'Asia/Kolkata'
def test_form_field_standard():
form = ChoicesDisplayForm()
assert form.fields['tz_standard'].choices == form.fields['tz_none'].choices
def test_form_field_with_gmt_offset():
form = ChoicesDisplayForm()
values, displays = zip(*form.fields['tz_with_gmt_offset'].choices)
assert values != common_tz_names
assert sorted(values) == sorted(common_tz_names)
assert (
displays[values.index('America/Argentina/Buenos_Aires')]
== 'GMT-03:00 America/Argentina/Buenos Aires'
)
assert displays[values.index('Europe/Moscow')] == 'GMT+03:00 Europe/Moscow'
def test_form_field_limited_none():
form = ChoicesDisplayForm()
assert form.fields['tz_limited_none'].choices == [
('Asia/Tokyo', 'Asia/Tokyo'),
('Asia/Dubai', 'Asia/Dubai'),
('America/Argentina/Buenos_Aires', 'America/Argentina/Buenos_Aires'),
('Africa/Nairobi', 'Africa/Nairobi'),
]
def test_form_field_limited_standard():
form = ChoicesDisplayForm()
assert form.fields['tz_limited_standard'].choices == [
('Asia/Tokyo', 'Asia/Tokyo'),
('Asia/Dubai', 'Asia/Dubai'),
('America/Argentina/Buenos_Aires', 'America/Argentina/Buenos Aires'),
('Africa/Nairobi', 'Africa/Nairobi'),
]
def test_form_field_limited_with_gmt_offset():
form = ChoicesDisplayForm()
assert form.fields['tz_limited_with_gmt_offset'].choices == [
('America/Argentina/Buenos_Aires', 'GMT-03:00 America/Argentina/Buenos Aires'),
('Africa/Nairobi', 'GMT+03:00 Africa/Nairobi'),
('Asia/Dubai', 'GMT+04:00 Asia/Dubai'),
('Asia/Tokyo', 'GMT+09:00 Asia/Tokyo'),
]
def test_model_form_field_none():
form = ChoicesDisplayModelForm()
values, displays = zip(*form.fields['tz_none'].choices)
assert values == ('',) + common_tz_objects
assert displays[values.index(pytz.timezone('America/Los_Angeles'))] == 'America/Los Angeles'
assert displays[values.index(pytz.timezone('Asia/Kolkata'))] == 'Asia/Kolkata'
def test_model_form_field_standard():
form = ChoicesDisplayModelForm()
assert form.fields['tz_standard'].choices == form.fields['tz_none'].choices
def test_model_form_field_with_gmt_offset():
form = ChoicesDisplayModelForm()
values, displays = zip(*form.fields['tz_with_gmt_offset'].choices)
assert values != common_tz_objects
assert sorted(str(v) for v in values) == sorted([''] + [str(tz) for tz in common_tz_objects])
assert (
displays[values.index(pytz.timezone('America/Argentina/Buenos_Aires'))]
== 'GMT-03:00 America/Argentina/Buenos Aires'
)
assert displays[values.index(pytz.timezone('Europe/Moscow'))] == 'GMT+03:00 Europe/Moscow'
def test_model_form_field_limited_none():
form = ChoicesDisplayModelForm()
assert form.fields['tz_limited_none'].choices == [
('', '---------'),
(pytz.timezone('Asia/Tokyo'), 'Asia/Tokyo'),
(pytz.timezone('Asia/Dubai'), 'Asia/Dubai'),
(pytz.timezone('America/Argentina/Buenos_Aires'), 'America/Argentina/Buenos_Aires'),
(pytz.timezone('Africa/Nairobi'), 'Africa/Nairobi'),
]
def test_moel_form_field_limited_standard():
form = ChoicesDisplayModelForm()
assert form.fields['tz_limited_standard'].choices == [
('', '---------'),
(pytz.timezone('Asia/Tokyo'), 'Asia/Tokyo'),
(pytz.timezone('Asia/Dubai'), 'Asia/Dubai'),
(pytz.timezone('America/Argentina/Buenos_Aires'), 'America/Argentina/Buenos Aires'),
(pytz.timezone('Africa/Nairobi'), 'Africa/Nairobi'),
]
def test_model_form_field_limited_with_gmt_offset():
form = ChoicesDisplayModelForm()
assert form.fields['tz_limited_with_gmt_offset'].choices == [
('', '---------'),
(
pytz.timezone('America/Argentina/Buenos_Aires'),
'GMT-03:00 America/Argentina/Buenos Aires',
),
(pytz.timezone('Africa/Nairobi'), 'GMT+03:00 Africa/Nairobi'),
(pytz.timezone('Asia/Dubai'), 'GMT+04:00 Asia/Dubai'),
(pytz.timezone('Asia/Tokyo'), 'GMT+09:00 Asia/Tokyo'),
]
|
enferno/public/views.py | rovak73/enferno | 373 | 11103808 | from flask import Flask, request, abort, Response, redirect, url_for, flash, Blueprint, send_from_directory
from flask.templating import render_template
from flask_security.decorators import roles_required, login_required
bp_public = Blueprint('public',__name__, static_folder='../static')
@bp_public.after_request
def add_header(response):
response.headers['Cache-Control'] = 'public, max-age=10800'
return response
@bp_public.route('/')
def index():
return render_template('index.html')
@bp_public.route('/robots.txt')
def static_from_root():
return send_from_directory(bp_public.static_folder, request.path[1:]) |
数值分析/t22.py | jasnzhuang/Personal-Homework | 463 | 11103820 | import scipy.optimize
def newton(f, fp, x0, delta=1e-4):
cnt = 0
x = x0
print('x0 = %.10lf' % x0)
while abs(f(x)) > delta or abs(x - x0) > delta:
s = f(x) / fp(x)
x0, x = x, x - s
lbd = 1
while abs(f(x)) >= abs(f(x0)):
x = x0 - lbd * s
lbd /= 2
cnt += 1
print('x%d = %.10lf, lambda = %.10lf' % (cnt, x, lbd))
return x
def gen22():
def func1(x):
return x**3 - x - 1
def func1p(x):
return 3*x*x - 1
def func2(x):
return -x**3 + 5 * x
def func2p(x):
return -3*x*x + 5
print('t22 result'.center(30, '-'))
x = newton(func1, func1p, 0.6)
print('final result: x = %.10lf, f(x) = %.10lf' % (x, func1(x)))
print('scipy gives: %.10lf' % (scipy.optimize.fsolve(func1, 0.6)))
x = newton(func2, func2p, 1.35)
print('final result: x = %.10lf, f(x) = %.10lf' % (x, func2(x)))
print('scipy gives: %.10lf' % (scipy.optimize.fsolve(func2, 1.35)))
print('end t22'.center(30, '-')+'\n')
if __name__ == '__main__':
gen22()
|
experiment/__init__.py | Octavian-ai/mac-graph | 116 | 11103847 | <filename>experiment/__init__.py
from .args import get_args
from .estimator_worker_test import EstimatorWorkerTestCase
|
setup.py | sha256/python-var-dump | 147 | 11103848 | from distutils.core import setup
setup(
name='var_dump',
version='1.2',
packages=['var_dump'],
url='http://github.com/sha256/python-var-dump',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='var_dump for python',
download_url='http://github.com/sha256/python-var-dump/tarball',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
backend/cloud_inquisitor/data/migrations/versions/cfb0ed4cced9_new_accounts_table.py | MrSecure/cloud-inquisitor | 462 | 11103863 | """New accounts table
Revision ID: cfb0ed4cced9
Revises: <PASSWORD>cdfbc<PASSWORD>
Create Date: 2018-07-10 13:26:01.588708
"""
from json import dumps as to_json
from alembic import op
import sqlalchemy as sa
from sqlalchemy import text, inspect
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'cfb0ed4cced9'
down_revision = '<KEY>'
select_ai = 'SELECT AUTO_INCREMENT FROM information_schema.TABLES WHERE TABLE_SCHEMA = :db AND TABLE_NAME = :table'
select_cfg_item = 'SELECT value FROM config_items WHERE namespace_prefix = :ns AND `key` = :key'
select_acct_types = 'SELECT account_type_id, account_type FROM account_types'
insert_acct_type = 'INSERT INTO account_types (account_type) VALUES (:name)'
insert_acct = (
'INSERT INTO accounts_new (account_id, account_name, account_type_id, contacts, enabled, required_roles)'
' VALUES(:id, :name, :type_id, :contacts, :enabled, :required_roles)'
)
insert_acct_prop = 'INSERT INTO account_properties (account_id, name, value) VALUES (:id, :name, :value)'
def upgrade():
create_new_tables()
migrate_data()
switch_tables()
def downgrade():
raise Exception('You cannot downgrade from this version')
def create_new_tables():
op.create_table('account_types',
sa.Column('account_type_id', mysql.INTEGER(unsigned=True), nullable=False, autoincrement=True),
sa.Column('account_type', sa.String(length=100), nullable=False),
sa.PrimaryKeyConstraint('account_type_id')
)
op.create_index(op.f('ix_account_types_account_type'), 'account_types', ['account_type'], unique=True)
op.create_table('accounts_new',
sa.Column('account_id', mysql.INTEGER(unsigned=True), nullable=False),
sa.Column('account_name', sa.String(length=256), nullable=False),
sa.Column('account_type_id', mysql.INTEGER(unsigned=True), nullable=False),
sa.Column('contacts', mysql.JSON(), nullable=False),
sa.Column('enabled', mysql.SMALLINT(unsigned=True), nullable=False),
sa.Column('required_roles', mysql.JSON(), nullable=True),
sa.ForeignKeyConstraint(
('account_type_id',),
['account_types.account_type_id'],
name='fk_account_account_type_id',
ondelete='CASCADE'
),
sa.PrimaryKeyConstraint('account_id')
)
op.create_index(op.f('ix_accounts_new_account_name'), 'accounts_new', ['account_name'], unique=True)
op.create_index(op.f('ix_accounts_new_account_type_id'), 'accounts_new', ['account_type_id'], unique=False)
op.create_table('account_properties',
sa.Column('property_id', mysql.INTEGER(unsigned=True), nullable=False, autoincrement=True),
sa.Column('account_id', mysql.INTEGER(unsigned=True), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('value', mysql.JSON(), nullable=False),
sa.ForeignKeyConstraint(
('account_id',),
['accounts_new.account_id'],
name='fk_account_properties_account_id',
ondelete='CASCADE'
),
sa.PrimaryKeyConstraint('property_id', 'account_id')
)
op.create_index(op.f('ix_account_properties_account_id'), 'account_properties', ['account_id'], unique=False)
op.create_index(op.f('ix_account_properties_name'), 'account_properties', ['name'], unique=False)
def migrate_data():
conn = op.get_bind()
account_types = {x['account_type']: x['account_type_id'] for x in conn.execute(text(select_acct_types))}
try:
schema = inspect(conn.engine).default_schema_name
conn.execute('SET FOREIGN_KEY_CHECKS=0')
res = conn.execute(text(select_ai), {'db': schema, 'table': 'accounts'})
acct_auto_increment = next(res)['AUTO_INCREMENT']
for acct_type in ('AWS', 'DNS: AXFR', 'DNS: CloudFlare'):
if acct_type not in account_types:
conn.execute(text(insert_acct_type), {'name': acct_type})
account_types[acct_type] = get_insert_id(conn)
res = conn.execute('SELECT * FROM accounts')
for acct in res:
if acct['account_type'] == 'AWS':
conn.execute(
text(insert_acct),
{
'id': acct['account_id'],
'name': acct['account_name'],
'type_id': account_types['AWS'],
'contacts': acct['contacts'],
'enabled': acct['enabled'],
'required_roles': acct['required_roles']
}
)
conn.execute(
text(insert_acct_prop),
{
'id': acct['account_id'],
'name': 'account_number',
'value': to_json(acct['account_number'])
}
)
conn.execute(
text(insert_acct_prop),
{
'id': acct['account_id'],
'name': 'ad_group_base',
'value': to_json(acct['ad_group_base'] or '')
}
)
print('Migrated {} account {}'.format(acct['account_type'], acct['account_name']))
elif acct['account_type'] == 'DNS_AXFR':
conn.execute(
text(insert_acct),
{
'id': acct['account_id'],
'name': acct['account_name'],
'type_id': account_types['DNS: AXFR'],
'contacts': acct['contacts'],
'enabled': acct['enabled'],
'required_roles': acct['required_roles']
}
)
server = get_config_value(conn, 'collector_dns', 'axfr_server')
domains = get_config_value(conn, 'collector_dns', 'axfr_domains')
conn.execute(text(insert_acct_prop), {'id': acct['account_id'], 'name': 'server', 'value': [server]})
conn.execute(text(insert_acct_prop), {'id': acct['account_id'], 'name': 'domains', 'value': domains})
print('Migrated {} account {}'.format(acct['account_type'], acct['account_name']))
elif acct['account_type'] == 'DNS_CLOUDFLARE':
conn.execute(
text(insert_acct),
{
'id': acct['account_id'],
'name': acct['account_name'],
'type_id': account_types['DNS: CloudFlare'],
'contacts': acct['contacts'],
'enabled': acct['enabled'],
'required_roles': acct['required_roles']
}
)
api_key = get_config_value(conn, 'collector_dns', 'cloudflare_api_key')
email = get_config_value(conn, 'collector_dns', 'cloudflare_email')
endpoint = get_config_value(conn, 'collector_dns', 'cloudflare_endpoint')
conn.execute(text(insert_acct_prop), {'id': acct['account_id'], 'name': 'api_key', 'value': api_key})
conn.execute(text(insert_acct_prop), {'id': acct['account_id'], 'name': 'email', 'value': email})
conn.execute(text(insert_acct_prop), {'id': acct['account_id'], 'name': 'endpoint', 'value': endpoint})
print('Migrated {} account {}'.format(acct['account_type'], acct['account_name']))
else:
print('Invalid account type: {}'.format(acct['account_type']))
conn.execute(text('ALTER TABLE accounts_new AUTO_INCREMENT = :counter'), {'counter': acct_auto_increment})
finally:
conn.execute('SET FOREIGN_KEY_CHECKS=1')
def switch_tables():
conn = op.get_bind()
conn.execute('SET FOREIGN_KEY_CHECKS=0')
conn.execute('DROP TABLE accounts')
conn.execute('ALTER TABLE resources MODIFY `account_id` int(10) unsigned')
conn.execute('ALTER TABLE accounts_new RENAME accounts')
conn.execute('ALTER TABLE accounts RENAME INDEX `ix_accounts_new_account_name` TO `ix_accounts_account_name`')
conn.execute('ALTER TABLE accounts RENAME INDEX `ix_accounts_new_account_type_id` TO `ix_accounts_account_type_id`')
conn.execute('SET FOREIGN_KEY_CHECKS=1')
def get_insert_id(conn):
return next(conn.execute('SELECT LAST_INSERT_ID()'))[0]
def get_config_value(conn, ns, item):
return next(conn.execute(text(select_cfg_item), {'ns': ns, 'key': item}))['value']
|
src/GridCal/Engine/Devices/templates.py | mzy2240/GridCal | 284 | 11103892 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import os
import pandas as pd
from GridCal.Engine.Devices.line import SequenceLineType
from GridCal.Engine.Devices.transformer import TransformerType
from GridCal.Engine.Devices.wire import Wire
def get_transformer_catalogue():
path = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(path, '..', '..', 'data', 'transformers.csv')
if os.path.exists(fname):
df = pd.read_csv(fname)
lst = list()
for i, item in df.iterrows():
tpe = TransformerType(hv_nominal_voltage=item['HV (kV)'],
lv_nominal_voltage=item['LV (kV)'],
nominal_power=item['Rate (MVA)'],
copper_losses=item['Copper losses (kW)'],
iron_losses=item['No load losses (kW)'],
no_load_current=item['No load current (%)'],
short_circuit_voltage=item['V short circuit (%)'],
gr_hv1=0.5,
gx_hv1=0.5,
name=item['Name'])
lst.append(tpe)
return lst
else:
return list()
def get_cables_catalogue():
path = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(path, '..', '..', 'data', 'cables.csv')
if os.path.exists(fname):
df = pd.read_csv(fname)
lst = list()
for i, item in df.iterrows():
"""
Name,
Rated voltage [kV],
Rated current [kA],
Nominal Frequency,
R [Ohm/km AC,20°C],
X [Ohm/km],
L [Ohm/km],
R0 (AC) [Ohm/km],
X0 [Ohm/km]
L0 [mH/km]
"""
tpe = SequenceLineType(name=item['Name'],
rating=item['Rated current [kA]'],
R=item['R [Ohm/km AC@20°C]'],
X=item['X [Ohm/km]'],
G=0.0,
B=0.0,
R0=item['R0 (AC) [Ohm/km]'],
X0=item['X0 [Ohm/km]'],
G0=0.0,
B0=0.0)
lst.append(tpe)
return lst
else:
return list()
def get_wires_catalogue():
path = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(path, '..', '..', 'data', 'wires.csv')
if os.path.exists(fname):
df = pd.read_csv(fname)
lst = list()
for i, item in df.iterrows():
'''
Size,Stranding,Material,Diameter [cm],GMR [m],R [Ohm/km],Rating [kA]
'''
name = str(item['Stranding']) + '_' + str(item['Material']) + '_' + str(item['Diameter [cm]'])
tpe = Wire(name=name,
gmr=item['GMR [m]'],
r=item['R [Ohm/km]'],
x=0.0,
max_current=item['Rating [kA]'])
lst.append(tpe)
return lst
else:
return list()
if __name__ == '__main__':
tr = get_transformer_catalogue()
cab = get_cables_catalogue()
wi = get_wires_catalogue()
print()
|
src/oci/database_management/models/cluster_cache_metric.py | Manny27nyc/oci-python-sdk | 249 | 11103912 | <reponame>Manny27nyc/oci-python-sdk<filename>src/oci/database_management/models/cluster_cache_metric.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ClusterCacheMetric(object):
"""
The response containing the cluster cache metrics for the
Oracle Real Application Clusters (Oracle RAC) database.
"""
def __init__(self, **kwargs):
"""
Initializes a new ClusterCacheMetric object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param cluster_cache_metrics:
The value to assign to the cluster_cache_metrics property of this ClusterCacheMetric.
:type cluster_cache_metrics: list[oci.database_management.models.TimeSeriesMetricDefinition]
"""
self.swagger_types = {
'cluster_cache_metrics': 'list[TimeSeriesMetricDefinition]'
}
self.attribute_map = {
'cluster_cache_metrics': 'clusterCacheMetrics'
}
self._cluster_cache_metrics = None
@property
def cluster_cache_metrics(self):
"""
**[Required]** Gets the cluster_cache_metrics of this ClusterCacheMetric.
A list of cluster cache metrics for a specific database.
:return: The cluster_cache_metrics of this ClusterCacheMetric.
:rtype: list[oci.database_management.models.TimeSeriesMetricDefinition]
"""
return self._cluster_cache_metrics
@cluster_cache_metrics.setter
def cluster_cache_metrics(self, cluster_cache_metrics):
"""
Sets the cluster_cache_metrics of this ClusterCacheMetric.
A list of cluster cache metrics for a specific database.
:param cluster_cache_metrics: The cluster_cache_metrics of this ClusterCacheMetric.
:type: list[oci.database_management.models.TimeSeriesMetricDefinition]
"""
self._cluster_cache_metrics = cluster_cache_metrics
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
wip/extract_dependencies/createS3Bucket.py | nitish-raj/data-science-on-aws | 163 | 11103963 | #!/usr/bin/env python
# coding: utf-8
# # Create S3 Bucket
# In[ ]:
#from IPython import get_ipython
import boto3
import sagemaker
import pandas as pd
import numpy as np
import tensorflow
session = boto3.session.Session()
region = session.region_name
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
s3 = boto3.Session().client(service_name="s3", region_name=region)
# In[ ]:
s = pd.Series([1, 3, 5, np.nan, 6, 8])
print(s)
# In[ ]:
print("Default bucket: {}".format(bucket))
# # Verify S3_BUCKET Bucket Creation
# In[ ]:
#get_ipython().run_cell_magic('bash', '', '\naws s3 ls s3://${bucket}/\n')
# In[ ]:
from botocore.client import ClientError
response = None
try:
response = s3.head_bucket(Bucket=bucket)
print(response)
setup_s3_bucket_passed = True
except ClientError as e:
print("[ERROR] Cannot find bucket {} in {} due to {}.".format(bucket, response, e))
# In[ ]:
#get_ipython().run_line_magic('store', '')
# # Release Resources
# In[ ]:
#get_ipython().run_cell_magic('html', '', '\n<p><b>Shutting down your kernel for this notebook to release resources.</b></p>\n<button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>\n \n<script>\ntry {\n els = document.getElementsByClassName("sm-command-button");\n els[0].click();\n}\ncatch(err) {\n // NoOp\n} \n</script>\n')
# In[ ]:
#git_ipython().run_cell_magic('javascript', '', '\ntry {\n Jupyter.notebook.save_checkpoint();\n Jupyter.notebook.session.delete();\n}\ncatch(err) {\n // NoOp\n}\n')
|
nabu/scripts/data.py | AzizCode92/nabu | 117 | 11103970 | '''@file data.py
does the data preperation'''
import os
from six.moves import configparser
import gzip
import tensorflow as tf
from nabu.processing.processors import processor_factory
from nabu.processing.tfwriters import tfwriter_factory
def main(expdir):
'''main function'''
#read the data conf file
parsed_cfg = configparser.ConfigParser()
parsed_cfg.read(os.path.join(expdir, 'database.conf'))
#loop over the sections in the data config
name = parsed_cfg.sections()[0]
#read the section
conf = dict(parsed_cfg.items(name))
#read the processor config
proc_cfg = configparser.ConfigParser()
proc_cfg.read(os.path.join(expdir, 'processor.cfg'))
#create a processor
processor = processor_factory.factory(
proc_cfg.get('processor', 'processor'))(proc_cfg)
#create a writer
writer = tfwriter_factory.factory(conf['type'])(conf['dir'])
#loop over the data files
for datafile in conf['datafiles'].split(' '):
if datafile[-3:] == '.gz':
open_fn = gzip.open
else:
open_fn = open
#loop over the lines in the datafile
for line in open_fn(datafile):
#split the name and the data line
splitline = line.strip().split(' ')
name = splitline[0]
dataline = ' '.join(splitline[1:])
#process the dataline
processed = processor(dataline)
#write the processed data to disk
if processed is not None:
writer.write(processed, name)
#write the metadata to file
processor.write_metadata(conf['dir'])
if __name__ == '__main__':
tf.app.flags.DEFINE_string('expdir', 'expdir', 'The experiments directory')
FLAGS = tf.app.flags.FLAGS
main(FLAGS.expdir)
|
src/deepsparse/lib.py | Willtor/deepsparse | 460 | 11103979 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
try:
from deepsparse.cpu import cpu_details
except ImportError:
raise ImportError(
"Unable to import deepsparse python apis. "
"Please contact <EMAIL>"
)
CORES_PER_SOCKET, AVX_TYPE, VNNI = cpu_details()
def init_deepsparse_lib():
try:
nm_package_dir = os.path.dirname(os.path.abspath(__file__))
onnxruntime_neuralmagic_so_path = os.path.join(
nm_package_dir, AVX_TYPE, "deepsparse_engine.so"
)
spec = importlib.util.spec_from_file_location(
"deepsparse.{}.deepsparse_engine".format(AVX_TYPE),
onnxruntime_neuralmagic_so_path,
)
engine = importlib.util.module_from_spec(spec)
spec.loader.exec_module(engine)
return engine
except ImportError:
raise ImportError(
"Unable to import deepsparse engine binaries. "
"Please contact <EMAIL>"
)
|
plugins/lib/__init__.py | otherbeast/hackers-tool-kit | 1,103 | 11103985 | <reponame>otherbeast/hackers-tool-kit<filename>plugins/lib/__init__.py
__all__ = ["markup", "graphs", "hostchecker"]
|
mistral/tests/unit/test_command_dispatcher.py | soda-research/mistral | 205 | 11103987 | # Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.engine import dispatcher
from mistral.tests.unit import base
from mistral.workflow import commands
def _print_commands(cmds):
print("commands:")
for cmd in cmds:
if isinstance(cmd, commands.RunTask):
print("%s, %s, %s" % (type(cmd), cmd.is_waiting(), cmd.unique_key))
else:
print("%s" % type(cmd))
class CommandDispatcherTest(base.BaseTest):
def test_rearrange_commands(self):
no_wait = commands.RunTask(None, None, None, None)
fail = commands.FailWorkflow(None, None, None, None)
succeed = commands.SucceedWorkflow(None, None, None, None)
wait1 = commands.RunTask(None, None, None, None)
wait1.wait = True
wait1.unique_key = 'wait1'
wait2 = commands.RunTask(None, None, None, None)
wait2.wait = True
wait2.unique_key = 'wait2'
wait3 = commands.RunTask(None, None, None, None)
wait3.wait = True
wait3.unique_key = 'wait3'
# 'set state' command is the first, others must be ignored.
initial = [fail, no_wait, wait1, wait3, wait2]
expected = [fail]
cmds = dispatcher._rearrange_commands(initial)
self.assertEqual(expected, cmds)
# 'set state' command is the last, tasks before it must be sorted.
initial = [no_wait, wait2, wait1, wait3, succeed]
expected = [no_wait, wait1, wait2, wait3, succeed]
cmds = dispatcher._rearrange_commands(initial)
self.assertEqual(expected, cmds)
# 'set state' command is in the middle, tasks before it must be sorted
# and the task after it must be ignored.
initial = [wait3, wait2, no_wait, succeed, wait1]
expected = [no_wait, wait2, wait3, succeed]
cmds = dispatcher._rearrange_commands(initial)
self.assertEqual(expected, cmds)
|
enaml/qt/docking/xbms.py | xtuzy/enaml | 1,080 | 11104009 | #------------------------------------------------------------------------------
# Copyright (c) 2013-2018, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Atom, Int, Bytes
from enaml.qt.QtCore import QSize
from enaml.qt.QtGui import QBitmap, QImage
class XBM(Atom):
""" A simple class representing an XMB image.
"""
#: The width of the xbm image.
width = Int()
#: The height of the xbm image.
height = Int()
#: The bytestring of image data.
data = Bytes()
def __init__(self, width, height, data):
""" Initialize an XBM image.
Parameters
----------
width : int
The width of the bitmap.
height : int
The height of the bitmap.
data : list
A list of 1s and 0s which represent the bitmap data.
The length must be equal to width * height.
"""
assert len(data) == (width * height)
bytes_list = []
for row in range(height):
val = 0
offset = row * width
for col in range(width):
d = col % 8
if col > 0 and d == 0:
bytes_list.append(val)
val = 0
v = data[offset + col]
val |= v << (7 - d)
bytes_list.append(val)
self.width = width
self.height = height
self.data = bytes(bytes_list)
def toBitmap(self):
size = QSize(self.width, self.height)
return QBitmap.fromData(size, self.data, QImage.Format_Mono)
CLOSE_BUTTON = XBM(8, 7, [
1, 1, 0, 0, 0, 0, 1, 1,
0, 1, 1, 0, 0, 1, 1, 0,
0, 0, 1, 1, 1, 1, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 1, 1, 1, 0, 0,
0, 1, 1, 0, 0, 1, 1, 0,
1, 1, 0, 0, 0, 0, 1, 1,
])
MAXIMIZE_BUTTON = XBM(8, 7, [
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1,
])
RESTORE_BUTTON = XBM(10, 9, [
0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
])
LINKED_BUTTON = XBM(10, 9, [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 0, 1, 1, 1, 1, 1, 1, 0, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])
UNLINKED_BUTTON = XBM(10, 9, [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])
PIN_BUTTON = XBM(9, 9, [
0, 0, 1, 1, 1, 1, 1, 0, 0,
0, 0, 1, 0, 0, 1, 1, 0, 0,
0, 0, 1, 0, 0, 1, 1, 0, 0,
0, 0, 1, 0, 0, 1, 1, 0, 0,
0, 0, 1, 0, 0, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0,
])
UNPIN_BUTTON = XBM(9, 9, [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1,
0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 0, 0, 0, 0, 1,
0, 0, 0, 1, 1, 1, 1, 1, 1,
0, 0, 0, 1, 1, 1, 1, 1, 1,
0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
])
TAB_BUTTON = XBM(9, 9, [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
])
|
robel/utils/testing/mock_time.py | Del9fina/robel | 109 | 11104042 | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to mock time-related methods."""
from absl.testing.absltest import mock
class MockTime:
"""Class to mock the functionality of the time module."""
def __init__(self, initial_time: float = 0.0):
self._time = initial_time
def time(self) -> float:
return self._time
def sleep(self, duration: float):
self._time += duration
def patch_time(module_path: str, **kwargs):
return mock.patch(module_path, MockTime(**kwargs))
|
tests/test_linter.py | Jean-Daniel/kapitan | 1,413 | 11104050 | #!/usr/bin/env python3
# Copyright 2019 The Kapitan Authors
# SPDX-FileCopyrightText: 2020 The Kapitan Authors <<EMAIL>>
#
# SPDX-License-Identifier: Apache-2.0
"linter tests"
import logging
import unittest
from kapitan.lint import start_lint
logging.basicConfig(level=logging.CRITICAL, format="%(message)s")
logger = logging.getLogger(__name__)
"""Helper class for creating args"""
class Object(object):
pass
class LinterTest(unittest.TestCase):
def test_lint(self):
args = Object()
args.fail_on_warning = False
args.skip_class_checks = False
args.skip_yamllint = False
args.inventory_path = "./tests/test_resources/inventory"
args.search_secrets = True
args.refs_path = "./tests/test_resources/secrets"
args.compiled_path = "./tests/test_resources/compiled"
num_issues_found = start_lint(args)
desired_output = 3
self.assertEqual(num_issues_found, desired_output)
|
python/veles/async_conn/tracer.py | pombredanne/veles | 918 | 11104056 | <reponame>pombredanne/veles<gh_stars>100-1000
# Copyright 2017 CodiLime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from veles.proto.node import PosFilter
from veles.proto import check
from veles.proto.exceptions import ObjectGoneError
from veles.util.future import done_future
class AsyncTracer:
def __init__(self, conn):
self.conn = conn
self.checks = []
self.nodes = {}
self.node_data = {}
def _get_node(self, id):
if id not in self.nodes:
self.nodes[id] = self.conn.get(id)
return self.nodes[id]
def _inject_node(self, node):
if node.id not in self.nodes:
self.nodes[node.id] = done_future(node)
def _get_from_node(self, id, func):
anode = self._get_node(id)
async def inner():
try:
node = await anode
except ObjectGoneError:
self.checks.append(check.CheckGone(
node=id,
))
raise
else:
return func(node)
loop = asyncio.get_event_loop()
return loop.create_task(inner())
def _get_parent(self, node):
self.checks.append(check.CheckParent(
node=node.id,
parent=node.parent
))
return node.parent
def get_parent(self, id):
return self._get_from_node(id, self._get_parent)
def _get_pos(self, node):
self.checks.append(check.CheckPos(
node=node.id,
pos_start=node.pos_start,
pos_end=node.pos_end,
))
return node.pos_start, node.pos_end
def get_pos(self, id):
return self._get_from_node(id, self._get_pos)
def _get_tags(self, node):
self.checks.append(check.CheckTags(
node=node.id,
tags=node.tags,
))
return node.tags
def get_tags(self, id):
return self._get_from_node(id, self._get_tags)
def _has_tag(self, node, tag):
res = tag in node.tags
self.checks.append(check.CheckTag(
node=node.id,
tag=tag,
present=res,
))
return res
def has_tag(self, id, tag):
return self._get_from_node(id, lambda node: self._has_tag(node, tag))
def _get_attr(self, node, key):
res = node.attr.get(key)
self.checks.append(check.CheckAttr(
node=node.id,
key=key,
data=res,
))
return res
def get_attr(self, id, key):
return self._get_from_node(id, lambda node: self._get_attr(node, key))
def _get_bindata_size(self, node, key):
res = node.bindata.get(key, 0)
self.checks.append(check.CheckBinDataSize(
node=node.id,
key=key,
size=res,
))
return res
def get_bindata_size(self, id, key):
return self._get_from_node(
id, lambda node: self._get_bindata_size(node, key))
def _get_trigger(self, node, key):
res = node.triggers.get(key)
self.checks.append(check.CheckTrigger(
node=node.id,
key=key,
state=res,
))
return res
def get_trigger(self, id, key):
return self._get_from_node(
id, lambda node: self._get_trigger(node, key))
async def _get_data(self, node, key, adata):
try:
res = await adata
except ObjectGoneError:
self.checks.append(check.CheckGone(
node=node,
))
raise
else:
self.checks.append(check.CheckData(
node=node,
key=key,
data=res,
))
return res
def get_data(self, node, key):
if (node, key) not in self.node_data:
adata = self.conn.get_data(node, key)
loop = asyncio.get_event_loop()
task = loop.create_task(self._get_data(node, key, adata))
self.node_data[node, key] = task
return self.node_data[node, key]
async def _get_bindata(self, node, key, start, end, adata):
try:
res = await adata
except ObjectGoneError:
self.checks.append(check.CheckGone(
node=node,
))
raise
else:
self.checks.append(check.CheckBinData(
node=node,
key=key,
start=start,
end=end,
data=res,
))
return res
def get_bindata(self, node, key, start=0, end=None):
adata = self.conn.get_bindata(node, key, start, end)
loop = asyncio.get_event_loop()
return loop.create_task(
self._get_bindata(node, key, start, end, adata))
async def _get_list(self, parent, tags, pos_filter, ares):
try:
res = await ares
except ObjectGoneError:
self.checks.append(check.CheckGone(
node=parent,
))
raise
else:
self.checks.append(check.CheckList(
parent=parent,
tags=tags,
pos_filter=pos_filter,
nodes={x.id for x in res},
))
for n in res:
self._inject_node(n)
return [x.id for x in res]
def get_list(self, parent, tags=set(), pos_filter=PosFilter()):
ares = self.conn.get_list(parent, tags, pos_filter)
loop = asyncio.get_event_loop()
return loop.create_task(self._get_list(parent, tags, pos_filter, ares))
def get_query(self, node, sig, params):
return self.conn.get_query(node, sig, params, self.checks)
|
pwndbg/commands/version.py | R2S4X/pwndbg | 287 | 11104077 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Displays gdb, python and pwndbg versions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import gdb
import pwndbg
import pwndbg.color
import pwndbg.commands
def _gdb_version():
return gdb.execute('show version', to_string=True).split('\n')[0]
def _py_version():
return sys.version.replace('\n', ' ')
@pwndbg.commands.Command
def version():
"""
Displays gdb, python and pwndbg versions.
"""
gdb_str = 'Gdb: %s' % _gdb_version()
py_str = 'Python: %s' % _py_version()
pwndbg_str = 'Pwndbg: %s' % pwndbg.__version__
print('\n'.join(map(pwndbg.color.light_red, (gdb_str, py_str, pwndbg_str))))
|
benchmarks/export.py | sethvargo/vaex | 337 | 11104088 | <reponame>sethvargo/vaex
import tempfile
import os
import numpy as np
import vaex
df = None
def setup_df(N, M):
global df
x = [np.arange(N, dtype=np.float64) for _ in range(M)]
df = vaex.from_dict({
f'c{i}': x[i] for i in range(M)
})
def time_export_plain(N, M):
with tempfile.TemporaryDirectory() as tmpdir:
df.export_hdf5(os.path.join(tmpdir, 'bench.hdf5'))
time_export_plain.setup = setup_df
time_export_plain.params = [[1024**2, 1024**2*16], [1, 4, 16]]
time_export_plain.param_names = ['N', 'M']
def time_export_correlated(N, M):
names = df.get_column_names()
new_names = [f't{i}' for i in range(M)]
for i in range(M):
df[f't{i}'] = sum(df[c] for c in names)
dfc = df[new_names]
with tempfile.TemporaryDirectory() as tmpdir:
dfc.export_hdf5(os.path.join(tmpdir, 'bench.hdf5'))
time_export_correlated.setup = setup_df
time_export_correlated.params = [[1024**2, 1024**2*16], [1, 4, 16]]
time_export_correlated.param_names = ['N', 'M']
|
public-engines/image-classification-engine/tests/training/test_metrics_evaluator.py | tallandroid/incubator-marvin | 101 | 11104100 | #!/usr/bin/env python
# coding=utf-8
try:
import mock
except ImportError:
import unittest.mock as mock
import numpy as np
from marvin_image_classification_engine.training import MetricsEvaluator
@mock.patch('marvin_image_classification_engine.training.metrics_evaluator.sk_metrics.accuracy_score')
@mock.patch('marvin_image_classification_engine.training.metrics_evaluator.cv2.imread')
def test_execute(mocked_imread, mocked_score, mocked_params):
test_data = {
'train': ['t0'],
'val': ['t1']
}
mocked_params = {
'TEST_STEPS': 20
}
mocked_imread.return_value = np.array([[[0, 1, 2], [1,2, 3], [2,3, 4]], [[0, 1, 2], [1,2, 3], [2,3, 4]], [[0, 1, 2], [1,2, 3], [2,3, 4]]])
mocked_model = mock.MagicMock()
ac = MetricsEvaluator(model=mocked_model, dataset=test_data)
ac.execute(params=mocked_params)
mocked_imread.assert_called_once()
mocked_score.assert_called_once()
|
man/src/filter_demo_output.py | stdft112/depot_tools | 2,151 | 11104118 | <reponame>stdft112/depot_tools
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import re
import sys
from xml.sax.saxutils import escape
from cStringIO import StringIO
if not os.path.exists('ansi2html'):
print 'You must run ./make_docs.sh once before running this script.'
sys.exit(1)
# This dependency is pulled in by make_docs.sh
# if it doesn't exist, run ./make_docs.sh first
sys.path.insert(0, 'ansi2html')
import ansi2html # pylint: disable=import-error, W0611
import ansi2html.converter # pylint: disable=import-error, W0611
def simpleXML(string):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET = 0
ESC_RE = re.compile('(\x1B\\[[^m]*?)m')
ret = StringIO()
boldstate = False
for tok in ESC_RE.split(string):
if not tok:
continue
if tok[0] == '\x1b':
codes = map(int, filter(bool, tok[2:].split(';')))
if not codes:
codes = [RESET]
for code in codes:
# only care about Bright
if code == BRIGHT and boldstate is False:
boldstate = True
ret.write('<emphasis role="strong">')
elif code in (DIM, NORMAL, RESET) and boldstate:
boldstate = False
ret.write('</emphasis>')
else:
ret.write(escape(tok))
if boldstate:
ret.write('</emphasis>')
return ret.getvalue()
def main():
ansi2html.converter.SCHEME['custom'] = (
"#000000", "#e42e16", "#19c518", "#e7e71c", "#492ee1",
"#d338d3", "#33d6e5", "#ffffff",
)
backend = sys.argv[1]
output = sys.stdin.read().rstrip()
callout_re = re.compile('\x1b\[(\d+)c\n')
callouts = collections.defaultdict(int)
for i, line in enumerate(output.splitlines(True)):
m = callout_re.match(line)
if m:
callouts[i + int(m.group(1)) - len(callouts)] += 1
output = callout_re.sub('', output)
w = sys.stdout.write
comment_marker = '###COMMENT###'
callout_counter = 1
if backend == 'xhtml11':
preamble = (
'</p></div><div class="listingblock"><div class="content"><pre><code>'
)
postamble = '</code></pre></div></div><p><div class="paragraph">'
c = ansi2html.Ansi2HTMLConverter(inline=True, scheme='custom')
in_code = False
body = c.convert(output, full=False)
for i, line in enumerate(body.splitlines()):
if line.startswith(comment_marker):
if in_code:
w(postamble)
in_code = False
w(line[len(comment_marker):])
else:
if not in_code:
w(preamble)
in_code = True
ext = ''
for _ in xrange(callouts[i]):
if not ext:
ext += '</span>'
ext += ' <b><%d></b>' % callout_counter
callout_counter += 1
if ext:
ext += '<span>'
w(line + ext + '\n')
if in_code:
w(postamble)
else:
preamble = '</simpara><literallayout class="monospaced">'
postamble = '</literallayout><simpara>'
in_code = False
body = simpleXML(output)
for i, line in enumerate(body.splitlines()):
if line.startswith(comment_marker):
if in_code:
w(postamble)
in_code = False
w(line[len(comment_marker):])
else:
if not in_code:
w(preamble)
in_code = True
ext = ''
for _ in xrange(callouts[i]):
ext += ' <emphasis role="strong">(%d)</emphasis>' % callout_counter
callout_counter += 1
w(line + ext + '\n')
if in_code:
w(postamble)
if __name__ == '__main__':
main()
|
emissary/controllers/log.py | LukeB42/Emissary | 193 | 11104143 | <reponame>LukeB42/Emissary<filename>emissary/controllers/log.py<gh_stars>100-1000
"""
This file provides a generic logging class.
It could do with automatic file rotation and syslog support.
<NAME> 2015
MIT License.
"""
import logging, time
class Log(object):
def __init__(self, program, log_file=None, log_stdout=False):
self.program = program
self.log = None
self.debug = False
if log_file or log_stdout:
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%d/%m/%Y %H:%M:%S'
)
self.log = logging.getLogger(program)
self.log.setLevel(logging.DEBUG)
if log_stdout:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.log.addHandler(ch)
if log_file:
ch = logging.FileHandler(log_file, 'a')
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.log.addHandler(ch)
def __call__(self, data, level='info'):
if self.log:
if level == 'debug': level = 10
if level == 'info': level = 20
if level == 'warning': level = 30
if level == 'error': level = 40
if level == 'critical': level = 50
if (level > 15) or (self.debug):
self.log.log(level,data)
|
climlab/dynamics/adv_diff_numerics.py | nfeldl/climlab | 160 | 11104145 | r'''
The 1D advection-diffusion problem
----------------------------------
The equation to be solved is
.. math::
\frac{\partial}{\partial t} \psi(x,t) &= -\frac{1}{w(x)} \frac{\partial}{\partial x} \left[ w(x) ~ \mathcal{F}(x,t) \right] + \dot{\psi}\\
\mathcal{F} &= U(x) \psi(x) -K(x) ~ \frac{\partial \psi}{\partial x} + F(x)
for the following quantities:
- state variable :math:`\psi(x,t)`
- diffusivity :math:`K(x)` in units of :math:`x^2 ~ t^{-1}`
- advecting velocity :math:`U(x)` in units of :math:`x ~ t^{-1}`
- a prescribed flux :math:`F(x)` (including boundary conditions) in units of :math:`\psi ~ x ~ t^{-1}`
- a scalar source/sink :math:`\dot{\psi}(x)` in units of :math:`\psi ~ t^{-1}`
- weighting function :math:`w(x)` for the divergence operator on curvilinear grids.
The boundary condition is a flux condition at the end points:
.. math::
\begin{align} \label{eq:fluxcondition}
\mathcal{F}(x_0) &= F(x_0) & \mathcal{F}(x_J) &= F(x_J)
\end{align}
which requires that the advecting velocity :math:`u(x) = 0` at the end points :math:`x_0, x_J`
The solver is implemented on a 1D staggered grid, with J+1 flux points
and J scalar points located somewhere between the flux points.
The solver does **not** assume the gridpoints are evenly spaced in :math:`x`.
Routines are provided to compute the following:
- Advective, diffusive, and total fluxes (the terms of :math:`\mathcal{F}`)
- Tridiagonal matrix operator for the flux convergence
- The actual flux convergence, or instantaneous scalar tendency given a current value of :math:`\psi(x)`
- Future value of :math:`\psi(x)` for an implicit timestep
Some details of the solver formulas are laid out below for reference.
Spatial discretization
----------------------
We use a non-uniform staggered spatial grid with scalar :math:`\psi` evaluated at :math:`J` points,
and flux :math:`\mathcal{F}` evaluated at :math:`J+1` flux points.
The indexing will run from :math:`j=0` to :math:`j=J` for the flux points,
and :math:`i=0` to :math:`i=J-1` for the scalar points.
This notation is consistent with zero-indexed Python arrays.
We define the following arrays:
- :math:`\mathcal{X}_b[j]` is a length J+1 array defining the location of the flux points.
- :math:`\mathcal{X}[i]` is a length J array defining the location of the scalar points, where point :math:`\mathcal{X}[j]` is somewhere between :math:`\mathcal{X}_b[j]` and :math:`\mathcal{X}_b[j+1]` for all :math:`j<J`.
- :math:`\psi[i], \dot{\psi}[i]` are length J arrays defined on :math:`\mathcal{X}`.
- :math:`U[j], K[j], F[j]` are all arrays of length J+1 defined on :math:`\mathcal{X}_b`.
- The grid weights are similarly in arrays :math:`W_b[j], W[i]` respectively on :math:`\mathcal{X}_b`` and :math:`\mathcal{X}`.
Centered difference formulas for the flux
-----------------------------------------
We use centered differences in :math:`x` to discretize the spatial derivatives.
The diffusive component of the flux is thus
.. math::
\begin{align*}
\mathcal{F}_{diff}[j] &= - K[j] \frac{ \left( \psi[i] - \psi[i-1] \right) }{\left( \mathcal{X}[i] - \mathcal{X}[i-1] \right)} & j&=i=1,2,...,J-1
\end{align*}
The diffusive flux is assumed to be zero at the boundaries.
The advective term requires an additional approximation since the scalar :math:`\psi` is not defined at the flux points.
We use a linear interpolation to the flux points:
.. math::
\begin{align*}
\psi_b[j] &\equiv \psi[i-1] \left( \frac{\mathcal{X}[i] - \mathcal{X}_b[j]}{\mathcal{X}[i] - \mathcal{X}[i-1]} \right) + \psi[i] \left( \frac{ \mathcal{X}_b[j] - \mathcal{X}[i-1] }{\mathcal{X}[i] - \mathcal{X}[i-1]} \right) & j&=i=1,2,...,J-1
\end{align*}
Note that for an evenly spaced grid, this reduces to the simple average :math:`\frac{1}{2} \left( \psi[i-1] + \psi[i] \right)`.
With this interpolation, the advective flux is approximated by
.. math::
\begin{align*}
\mathcal{F}_{adv}[j] &= \frac{U[j] }{\mathcal{X}[i] - \mathcal{X}[i-1]} \left( \psi[i-1] (\mathcal{X}[i] - \mathcal{X}_b[j]) + \psi[i] (\mathcal{X}_b[j] - \mathcal{X}[i-1]) \right) & j&=i=1,2,...,J-1
\end{align*}
The total flux away from the boundaries (after some recombining terms) is thus:
.. math::
\mathcal{F}[j] = F[j] + \psi[i-1] \left( \frac{K[j] + U[j] (\mathcal{X}[i] - \mathcal{X}_b[j]) }{ \mathcal{X}[i] - \mathcal{X}[i-1] } \right) - \psi[i] \left( \frac{K[j] - U[j] (\mathcal{X}_b[j] - \mathcal{X}[i-1]) }{\mathcal{X}[i] - \mathcal{X}[i-1] } \right)
which is valid for j=i=1,2,...,J-1.
Centered difference formulas for the flux convergence
-----------------------------------------------------
Centered difference approximation of the flux convergence gives
.. math::
\begin{align*}
\frac{\partial }{\partial t} \psi[i] &= -\frac{ W_b[j+1] \mathcal{F}[j+1] - W_b[j] \mathcal{F}[j] }{W[i] ( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )} + \dot{\psi}[i] & i&=j=0,1,...,J-1
\end{align*}
The flux convergences are best expressed together in matrix form:
.. math::
\begin{equation}
\frac{\partial \boldsymbol{\psi}}{\partial t} = \boldsymbol{T} ~ \boldsymbol{\psi} + \boldsymbol{S}
\end{equation}
where :math:`\boldsymbol{\psi}` is the :math:`J\times1` column vector,
:math:`\boldsymbol{S}` is a :math:`J\times1` column vector
representing the prescribed flux convergence and source terms, whose elements are
.. math::
\begin{align}
S[i] &= \frac{-W_b[j+1] F[j+1] + W_b[j] F[j]}{W[i] ( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )} + \dot{\psi}[i] & i&=j=0,1,...,J-1
\end{align}
and :math:`\boldsymbol{T}` is a :math:`J\times J` tridiagonal matrix:
.. math::
\begin{equation}
\boldsymbol{T} ~ \boldsymbol{\psi} = \left[\begin{array}{ccccccc} T_{m0} & T_{u1} & 0 & ... & 0 & 0 & 0 \\T_{l0} & T_{m1} & T_{u2} & ... & 0 & 0 & 0 \\ 0 & T_{l1} & T_{m2} & ... & 0 & 0 & 0 \\... & ... & ... & ... & ... & ... & ... \\0 & 0 & 0 & ... & T_{m(J-3)} & T_{u(J-2)} & 0 \\0 & 0 & 0 & ... & T_{l(J-3)} & T_{m(J-2)} & T_{u(J-1)} \\0 & 0 & 0 & ... & 0 & T_{l(J-2)} & T_{m(J-1)}\end{array}\right] \left[\begin{array}{c} \psi_0 \\ \psi_1 \\ \psi_2 \\... \\ \psi_{J-3} \\ \psi_{J-2} \\ \psi_{J-1} \end{array}\right]
\end{equation}
with vectors :math:`T_l, T_m, T_u` representing respectively the lower, main, and upper diagonals of :math:`\boldsymbol{T}`.
We will treat all three vectors as length J;
the 0th element of :math:`T_u` is ignored while the (J-1)th element of :math:`T_l` is ignored
(this is consistent with the expected inputs for the Python module scipy.linalg.solve_banded).
The instantanous tendency is then easily computed by matrix multiplication.
The elements of the main diagonal of :math:`\boldsymbol{\psi}` can be computed from
.. math::
\begin{align} \label{eq:maindiag}
\begin{split}
T_m[i] &= -\left( \frac{ W_b[j+1] \big( K[j+1] + U[j+1] (\mathcal{X}[i+1] - \mathcal{X}_b[j+1]) \big) }{ W[i] ( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )(\mathcal{X}[i+1] - \mathcal{X}[i]) } \right) \\
& \qquad - \left( \frac{W_b[j] \big( K[j] - U[j] (\mathcal{X}_b[j] - \mathcal{X}[i-1]) \big) }{W[i] ( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )(\mathcal{X}[i] - \mathcal{X}[i-1]) } \right) \\
i &=j=0,2,...,J-1
\end{split}
\end{align}
which is valid at the boundaries so long as we set :math:`W_b[0] = W_b[J] = 0`.
The lower diagonal (including the right boundary condition) is computed from
.. math::
\begin{align} \label{eq:lowerdiag}
\begin{split}
T_l[i-1] &= \left( \frac{W_b[j]}{W[i] } \right) \left( \frac{ K[j] + U[j] (\mathcal{X}[i] - \mathcal{X}_b[j]) }{( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] ) (\mathcal{X}[i] - \mathcal{X}[i-1] )} \right) \\
i &=j =1,2,...,J-2, J-1
\end{split}
\end{align}
Finally the upper diagonal (including the left boundary condition) is computed from
.. math::
\begin{align} \label{eq:upperdiag}
\begin{split}
T_u[i+1] &= \left( \frac{W_b[j+1]}{W[i]} \right) \left( \frac{K[j+1] - U[j+1] (\mathcal{X}_b[j+1] - \mathcal{X}[i]) }{( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )(\mathcal{X}[i+1] - \mathcal{X}[i] ) } \right) \\
i &= j=0,...,J-2
\end{split}
\end{align}
Implicit time discretization
----------------------------
The forward-time finite difference approximation to LHS of the flux-convergence equation is simply
.. math::
\begin{equation}
\frac{\partial \psi[i]}{\partial t} \approx \frac{\psi^{n+1}[i]- \psi^{n}[i]}{\Delta t}
\end{equation}
where the superscript :math:`n` indicates the time index.
We use the implicit-time method, in which the RHS is evaluated at the future time :math:`n+1`.
Applying this to the matrix equation above
and moving all the terms at time :math:`n+1` over to the LHS yields
.. math::
\begin{equation} \label{eq:implicit_tridiagonal}
\left( \boldsymbol{I} - \boldsymbol{T} \Delta t \right) \boldsymbol{\psi}^{n+1} = \boldsymbol{\psi}^{n} + \boldsymbol{S} \Delta t
\end{equation}
where :math:`\boldsymbol{I}` is the :math:`J\times J` identity matrix.
Solving for the future value :math:`\boldsymbol{\psi}^{n+1}` is then accomplished
by solving the :math:`J \times J` tridiagonal linear system using standard routines.
Analytical benchmark
--------------------
Here is an analytical case to be used for testing purposes to validate the numerical code.
This is implemented in the CLIMLAB test suite.
- :math:`K=K_0` is constant
- :math:`w(x) = 1` everywhere (Cartesian coordinates)
- :math:`F = 0` everywhere
- :math:`\psi(x,0) = \psi_0 \sin^2\left(\frac{\pi x}{L}\right)`
- :math:`u(x) = U_0 \sin\left(\frac{\pi x}{L}\right)`
for a domain with endpoints at :math:`x=0` and :math:`x=L`.
The analytical solution is
.. math::
\begin{align}
\mathcal{F} &= \psi_0 \sin\left(\frac{\pi x}{L}\right) \left[U_0 \sin^2\left(\frac{\pi x}{L}\right) - 2K \frac{\pi}{L} \cos\left(\frac{\pi x}{L}\right) \right] \\
\frac{\partial \psi}{\partial t} &= -\psi_0 \frac{\pi}{L} \left\{ 3 U_0 \sin^2\left(\frac{\pi x}{L}\right) \cos\left(\frac{\pi x}{L}\right) -2K\frac{\pi}{L} \left[\cos^2\left(\frac{\pi x}{L}\right) -\sin^2\left(\frac{\pi x}{L}\right) \right] \right\}
\end{align}
which satisfies the boundary condition :math:`\mathcal{F} = 0` at :math:`x=0` and :math:`x=L`.
Module function reference
-------------------------
All the functions in ``climlab.dynamics.adv_diff_numerics`` are vectorized
to handle multidimensional input. The key assumption is that
**advection-diffusion operates along the final dimension**.
Inputs should be reshaped appropriately (e.g. with ``numpy.moveaxis()``)
before calling these functions.
'''
from __future__ import division
from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis
from numpy.linalg import solve
from scipy.linalg import solve_banded
def diffusive_flux(X, Xb, K, field):
'''Return the diffusive flux on cell boundaries (length J+1)'''
flux = zeros_like(K)
flux[...,1:-1] += field[...,:-1]*K[...,1:-1]/diff(X,axis=-1)
flux[...,1:-1] -= field[...,1:]*K[...,1:-1]/diff(X,axis=-1)
return flux
def advective_flux(X, Xb, U, field):
'''Return the advective flux on cell boundaries (length J+1)'''
flux = zeros_like(U)
flux[...,1:-1] += field[...,:-1]*(U[...,1:-1]*(X[...,1:]-Xb[...,1:-1]))/diff(X,axis=-1)
flux[...,1:-1] -= field[...,1:]*(-U[...,1:-1]*(Xb[...,1:-1]-X[...,:-1]))/diff(X,axis=-1)
return flux
def total_flux(X, Xb, K, U, field, prescribed_flux=None):
'''Return the total (advective + diffusive + prescribed) flux
on cell boundaries (length J+1)'''
if prescribed_flux is None:
prescribed_flux = zeros_like(U)
return advective_flux(X, Xb, U, field) + diffusive_flux(X, Xb, K, field) + prescribed_flux
def advdiff_tridiag(X, Xb, K, U, W=None, Wb=None, use_banded_solver=False):
r'''Compute the tridiagonal matrix operator for the advective-diffusive
flux convergence.
Input arrays of length J+1:
Xb, Wb, K, U
Input arrays of length J:
X, W
The 0th and Jth (i.e. first and last) elements of Wb are ignored;
assuming boundary condition is a prescribed flux.
The return value depends on input flag ``use_banded_solver``
If ``use_banded_solver==True``, return a 3xJ array containing the elements of the tridiagonal.
This version is restricted to 1D input arrays,
but is suitable for use with the efficient banded solver.
If ``use_banded_solver=False`` (which it must be for multidimensional input),
return an array (...,J,J) with the full tridiagonal matrix.
'''
J = X.shape[-1]
if (W is None):
W = ones_like(X)
if (Wb is None):
Wb = ones_like(Xb)
# These are all length (J-1) in the last axis
lower_diagonal = (Wb[...,1:-1]/W[...,1:] *
(K[...,1:-1]+U[...,1:-1]*(X[...,1:]-Xb[...,1:-1])) /
((Xb[...,2:]-Xb[...,1:-1])*(X[...,1:]-X[...,:-1])))
upper_diagonal = (Wb[...,1:-1]/W[...,:-1] *
(K[...,1:-1]-U[...,1:-1]*(Xb[...,1:-1]-X[...,:-1])) /
((Xb[...,1:-1]-Xb[...,:-2])*(X[...,1:]-X[...,:-1])))
main_diagonal_term1 = (-Wb[...,1:-1]/W[...,:-1] *
(K[...,1:-1]+U[...,1:-1]*(X[...,1:]-Xb[...,1:-1])) /
((Xb[...,1:-1]-Xb[...,:-2])*(X[...,1:]-X[...,:-1])))
main_diagonal_term2 = (-Wb[...,1:-1]/W[...,1:] *
(K[...,1:-1]-U[...,1:-1]*(Xb[...,1:-1]-X[...,:-1])) /
((Xb[...,2:]-Xb[...,1:-1])*(X[...,1:]-X[...,:-1])))
if use_banded_solver:
# Pack the diagonals into a 3xJ array
tridiag_banded = zeros((3,J))
# Lower diagonal (last element ignored)
tridiag_banded[2,:-1] = lower_diagonal
# Upper diagonal (first element ignored)
tridiag_banded[0,1:] = upper_diagonal
# Main diagonal, term 1, length J-1
tridiag_banded[1,:-1] += main_diagonal_term1
# Main diagonal, term 2, length J-1
tridiag_banded[1, 1:] += main_diagonal_term2
return tridiag_banded
else:
# If X.size is (...,J), then the tridiagonal operator is (...,J,J)
sizeJJ = tuple([n for n in X.shape[:-1]] + [J,J])
tridiag = zeros(sizeJJ)
# indices for main, upper, and lower diagonals of a JxJ matrix
inds_main = diag_indices(J)
inds_upper = (inds_main[0][:-1], inds_main[1][1:])
inds_lower = (inds_main[0][1:], inds_main[1][:-1])
# Lower diagonal (length J-1)
tridiag[...,inds_lower[0],inds_lower[1]] = lower_diagonal
# Upper diagonal (length J-1)
tridiag[...,inds_upper[0],inds_upper[1]] = upper_diagonal
# Main diagonal, term 1, length J-1
tridiag[...,inds_main[0][:-1],inds_main[1][:-1]] += main_diagonal_term1
# Main diagonal, term 2, length J-1
tridiag[...,inds_main[0][1:],inds_main[1][1:]] += main_diagonal_term2
return tridiag
def make_the_actual_tridiagonal_matrix(tridiag_banded):
'''Convert a (3xJ) banded array into full (JxJ) tridiagonal matrix form.'''
return (diag(tridiag_banded[1,:], k=0) +
diag(tridiag_banded[0,1:], k=1) +
diag(tridiag_banded[2,:-1], k=-1))
def compute_source(X, Xb, prescribed_flux=None, prescribed_source=None,
W=None, Wb=None):
'''Return the source array S consisting of the convergence of the prescribed flux
plus the prescribed scalar source.'''
if (W is None):
W = ones_like(X)
if (Wb is None):
Wb = ones_like(Xb)
if prescribed_flux is None:
prescribed_flux = zeros_like(Xb)
if prescribed_source is None:
prescribed_source = zeros_like(X)
F = prescribed_flux
return ((-Wb[...,1:]*F[...,1:]+Wb[...,:-1]*F[...,:-1]) /
(W*(Xb[...,1:]-Xb[...,:-1])) + prescribed_source)
def compute_tendency(field, tridiag, source, use_banded_solver=False):
r'''Return the instantaneous scalar tendency.
This is the sum of the convergence of advective+diffusive flux plus any
prescribed convergence or scalar sources.
The convergence is computed by matrix multiplication:
.. math::
\frac{\partial \psi}{\partial t} = T \times \psi + S
where :math:`T` is the tridiagonal flux convergence matrix.
'''
if use_banded_solver:
tridiag = make_the_actual_tridiagonal_matrix(tridiag)
# np.matmul expects the final 2 dims of each array to be matrices
# add a singleton dimension to field so we get (J,J)x(J,1)->(J,1)
result = matmul(tridiag, field[...,newaxis]) + source[...,newaxis]
# Now strip the extra dim
return result[...,0]
def implicit_step_forward(initial_field, tridiag, source, timestep,
use_banded_solver=False):
r'''Return the field at future time using an implicit timestep.
The matrix problem is
.. math::
(I - T \Delta t) \psi^{n+1} = \psi^n + S \Delta t
where :math:`T` is the tridiagonal matrix for the flux convergence, :math:`psi` is the
state variable, the superscript :math:`n` refers to the time index, and :math:`S \Delta t`
is the accumulated source over the timestep :math:`\Delta t`.
Input arguments:
- ``initial_field``: the current state variable :math:`\psi^n`, dimensions (...,J)
- ``tridiag``: the tridiagonal matrix :math:`T`, dimensions (...,J,J) or (...,3,J) depending on the value of ``use_banded_solver``
- ``source``: prescribed sources/sinks of :math:`\psi`, dimensions (...,J)
- ``timestep``: the discrete timestep in time units
- ``use_banded_solver``: switch to use the optional efficient banded solver (see below)
Returns the updated value of the state variable :math:`\psi^{n+1}`, dimensions (...,J)
The expected shape of ``tridiag`` depends on the switch ``use_banded_solver``,
which should be consistent with that used in the call to ``advdiff_tridiag()``.
If ``True``, we use the efficient banded matrix solver
``scipy.linalg.solve_banded()``.
However this will probably only work for a 1D state variable.
The default is to use the general linear system solver ``numpy.linalg.solve()``.
'''
RHS = initial_field + source*timestep
I = 0.*tridiag
J = initial_field.shape[-1]
if use_banded_solver:
I[1,:] = 1. # identity matrix in banded form
IminusTdt = I-tridiag*timestep
return solve_banded((1, 1), IminusTdt, RHS)
else:
# indices for main, upper, and lower diagonals of a JxJ matrix
inds_main = diag_indices(J)
I = 0.*tridiag
I[...,inds_main[0],inds_main[1]] = 1. # stacked identity matrix
IminusTdt = I-tridiag*timestep
return solve(IminusTdt, RHS)
|
ida_plugin/uefi_analyser/dep_browser.py | fengjixuchui/UEFI_RETool | 240 | 11104158 | # SPDX-License-Identifier: MIT
import json
import ida_kernwin
import idaapi
import idautils
import idc
from idaapi import Choose
from .utils import get_dep_json
NAME = "UEFI_RETool"
class chooser_handler_t(idaapi.action_handler_t):
def __init__(self, thing):
idaapi.action_handler_t.__init__(self)
self.thing = thing
def activate(self, ctx):
pass
def update(self, ctx):
if idaapi.is_chooser_tform(ctx.form_type):
return idaapi.AST_ENABLE_FOR_FORM
return idaapi.AST_DISABLE_FOR_FORM
class ProtsWindow(Choose):
"""class to display protocols information output window"""
def __init__(self, title, dep_json, nb=5):
sizes = self._get_sizes(dep_json)
Choose.__init__(
self,
title,
[
["GUID", sizes["GUID"]],
["Name", sizes["Name"]],
["Module", sizes["Module"]],
["Service", sizes["Service"]],
],
flags=0,
width=None,
height=None,
embedded=False,
)
self.n = 0
self.items = self._get_lines(dep_json)
self.selcount = 0
self.modal = False
self.popup_names = list()
self.dep_json = dep_json
def _get_sizes(self, data):
"""get maximum field sizes"""
sizes = {"GUID": 0, "Name": 0, "Module": 0, "Service": 0}
for element in data:
if len(element["guid"]) > sizes["GUID"]:
sizes["GUID"] = len(element["guid"])
if len(element["protocol_name"]) > sizes["Name"]:
sizes["Name"] = len(element["protocol_name"])
if len(element["module_name"]) > sizes["Module"]:
sizes["Module"] = len(element["module_name"])
if len(element["service"]) > sizes["Service"]:
sizes["Service"] = len(element["service"])
return sizes
def _get_lines(self, dep_json):
"""to fill line in the table"""
lines = list()
for elem in dep_json:
item = [
elem["guid"],
elem["protocol_name"],
elem["module_name"],
elem["service"],
]
if not lines.count(item):
lines.append(item)
return lines
def _make_item(self):
"""make custom element"""
item = [
idaapi.ask_str(str(), 0, "GUID"),
idaapi.ask_str(str(), 0, "Name"),
idaapi.ask_str(str(), 0, "Module"),
idaapi.ask_str(str(), 0, "Service"),
]
self.n += 1
return item
def OnGetLine(self, n):
return self.items[n]
def OnGetSize(self):
n = len(self.items)
return n
def OnClose(self):
print(f"[{NAME}] dependency browser window was closed")
def OnEditLine(self, n):
print(f"[{NAME}] editing is not supported")
return n
def OnInsertLine(self, n):
print(f"[{NAME}] inserting is not supported")
return n
def OnSelectLine(self, n):
self.selcount += 1
guid = self.items[n][0]
print(f"[{NAME}] {self.items[n][1]} protocol information")
for protocol in self.dep_json:
if protocol["guid"] == guid:
print(json.dumps(protocol, indent=4))
break
return n
def OnDeleteLine(self, n):
print(f"[{NAME}] deleting is not supported")
return n
def OnRefresh(self, n):
print(f"[{NAME}] refreshing is not supported")
return n
def OnGetLineAttr(self, n):
return n
def show(self):
return self.Show(self.modal) >= 0
def handle_json(res_json):
dep_json = get_dep_json(res_json)
wind = ProtsWindow(f"{NAME} dependency browser", dep_json, nb=10)
wind.show()
def run(log_file):
try:
with open(log_file, "rb") as f:
res_json = json.load(f)
handle_json(res_json)
except Exception as e:
print(f"[{NAME} error] {repr(e)}")
return False
return True
|
examples/decorators/decorator_count_exceptions.py | JacobHenner/aioprometheus | 104 | 11104167 | <reponame>JacobHenner/aioprometheus<filename>examples/decorators/decorator_count_exceptions.py
#!/usr/bin/env python
"""
.. code-block:: python
$ python decorator_count_exceptions.py
The example script can be tested using ``curl``.
.. code-block:: console
$ curl :8000/metrics
# HELP request_handler_exceptions Number of exceptions in requests
# TYPE request_handler_exceptions counter
request_handler_exceptions{route="/"} 3
You may need to Ctrl+C twice to exit the example script.
"""
import asyncio
import random
from aioprometheus import Counter, count_exceptions
from aioprometheus.service import Service
# Create a metric to track requests currently in progress.
REQUEST_EXCEPTIONS = Counter(
"request_handler_exceptions", "Number of exceptions in requests"
)
REQUESTS = Counter("request_total", "Total number of requests")
# Decorate function with metric.
@count_exceptions(REQUEST_EXCEPTIONS, {"route": "/"})
async def handle_request(duration):
"""A dummy function that occasionally raises an exception"""
REQUESTS.inc({"route": "/"})
if duration < 0.3:
raise Exception("Ooops")
await asyncio.sleep(duration)
async def handle_requests():
# Start up the server to expose the metrics.
await svr.start(port=8000)
# Generate some requests.
while True:
try:
await handle_request(random.random())
except Exception:
pass # keep handling
if __name__ == "__main__":
loop = asyncio.get_event_loop()
svr = Service()
try:
loop.run_until_complete(handle_requests())
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(svr.stop())
loop.stop()
loop.close()
|
housekeep.py | kasimov-maxim/aiosmtpd | 257 | 11104170 | #!/usr/bin/env python3
# Copyright 2014-2021 The aiosmtpd Developers
# SPDX-License-Identifier: Apache-2.0
import argparse
import inspect
import os
import pprint
import shutil
import sys
from pathlib import Path
try:
# noinspection PyPackageRequirements
from colorama import ( # pytype: disable=import-error
Fore,
Style,
init as colorama_init,
)
except ImportError:
colorama_init = None
class Fore:
CYAN = "\x1b[1;96m"
GREEN = "\x1b[1;92m"
YELLOW = "\x1b[1;93m"
class Style:
BRIGHT = "\x1b[1m"
RESET_ALL = "\x1b[0m"
DUMP_DIR = "_dump"
TOX_ENV_NAME = os.environ.get("TOX_ENV_NAME", None)
# These dirs will be processed if exists, so no need to remove old entries.
# I suggest keeping them to clean up old artefacts just in case.
WORKDIRS = (
".mypy_cache",
".pytype",
".pytest-cache", # <-+-- One of these is a typo
".pytest_cache", # <-+ Keep them both just in case
".tox",
DUMP_DIR,
"_dynamic", # Pre 1.4.0a4
"aiosmtpd.egg-info",
"build",
"dist",
"htmlcov",
"prof", # Only if "profile" testenv ran
)
WORKFILES = (
".coverage",
".coverage.*",
"coverage.xml",
"diffcov.html",
"coverage-*.xml",
"diffcov-*.html",
)
TERM_WIDTH, TERM_HEIGHT = shutil.get_terminal_size()
# region #### Helper funcs ############################################################
def deldir(targ: Path, verbose: bool = True):
if not targ.exists():
return
rev_items = sorted(targ.rglob("*"), reverse=True)
for i, pp in enumerate(rev_items, start=1):
if pp.is_symlink():
pp.unlink()
elif pp.is_file():
pp.chmod(0o600)
pp.unlink()
elif pp.is_dir():
pp.chmod(0o700)
pp.rmdir()
else:
raise RuntimeError(f"Don't know how to handle '{pp}'")
if verbose and ((i & 0x3FF) == 0):
print(".", end="", flush=True)
targ.rmdir()
# endregion
# region #### Functional blocks #######################################################
def dump_env():
dumpdir = Path(DUMP_DIR)
dumpdir.mkdir(exist_ok=True)
with (dumpdir / f"ENV.{TOX_ENV_NAME}.py").open("wt") as fout:
print("ENV = \\", file=fout)
pprint.pprint(dict(os.environ), stream=fout)
def move_prof(verbose: bool = False):
"""Move profiling files to per-testenv dirs"""
profpath = Path("prof")
# fmt: off
prof_files = [
filepath
for fileglob in ("*.prof", "*.svg")
for filepath in profpath.glob(fileglob)
]
# fmt: on
if not prof_files:
return
targpath = profpath / TOX_ENV_NAME
if verbose:
print(f"Gathering to {targpath} ...", end="", flush=True)
os.makedirs(targpath, exist_ok=True)
for f in targpath.glob("*"):
f.unlink()
for f in prof_files:
if verbose:
print(".", end="", flush=True)
f.rename(targpath / f.name)
if verbose:
print(flush=True)
def pycache_clean(verbose=False):
"""Cleanup __pycache__ dirs & bytecode files (if any)"""
aiosmtpdpath = Path(".")
for i, f in enumerate(aiosmtpdpath.rglob("*.py[co]"), start=1):
if verbose and ((i & 0xFF) == 0):
print(".", end="", flush=True)
f.unlink()
for i, d in enumerate(aiosmtpdpath.rglob("__pycache__"), start=1):
if verbose and ((i & 0x7) == 0):
print(".", end="", flush=True)
deldir(d, verbose)
if verbose:
print(flush=True)
def rm_work():
"""Remove work dirs & files. They are .gitignore'd anyways."""
print(f"{Style.BRIGHT}Removing work dirs ... ", end="", flush=True)
# The reason we list WORKDIRS explicitly is because we don't want to accidentally
# bork IDE workdirs such as .idea/ or .vscode/
for dd in WORKDIRS:
print(dd, end="", flush=True)
deldir(Path(dd))
print(" ", end="", flush=True)
print(f"\n{Style.BRIGHT}Removing work files ...", end="", flush=True)
for fnglob in WORKFILES:
for fp in Path(".").glob(fnglob):
print(".", end="", flush=True)
fp.exists() and fp.unlink()
print(flush=True)
# endregion
# region #### Dispatchers #############################################################
def dispatch_prep():
"""
Prepare work directories and dump env vars
"""
dump_env()
def dispatch_gather():
"""
Gather inspection results into per-testenv dirs
"""
move_prof()
def dispatch_remcache():
"""
Remove all .py[co] files and all __pycache__ dirs
"""
pycache_clean()
def dispatch_superclean():
"""
Total cleaning of all test artifacts
"""
if TOX_ENV_NAME is not None:
raise RuntimeError("Do NOT run this inside tox!")
print(f"{Style.BRIGHT}Running pycache cleanup ...", end="")
pycache_clean(verbose=True)
rm_work()
# endregion
def get_opts(argv):
# From: https://stackoverflow.com/a/49999185/149900
class NoAction(argparse.Action):
def __init__(self, **kwargs):
kwargs.setdefault("default", argparse.SUPPRESS)
kwargs.setdefault("nargs", 0)
super().__init__(**kwargs)
def __call__(self, *args, **kwargs):
pass
dispers = {
name.replace("dispatch_", ""): inspect.getdoc(obj)
for name, obj in inspect.getmembers(sys.modules[__name__])
if name.startswith("dispatch_") and inspect.isfunction(obj)
}
parser = argparse.ArgumentParser()
parser.register("action", "no_action", NoAction)
parser.add_argument(
"--force", "-F", action="store_true", help="Force action even if in CI"
)
parser.add_argument(
"-A",
"--afterbar",
dest="afterbar",
default=0,
action="count",
help="Print horizontal bar after action. Repeat this option for more bars.",
)
# From: https://stackoverflow.com/a/49999185/149900
parser.add_argument(
"cmd", metavar="COMMAND", choices=sorted(dispers.keys()), help="(See below)"
)
cgrp = parser.add_argument_group(title="COMMAND is one of")
for name, doc in sorted(dispers.items()):
cgrp.add_argument(name, help=doc, action="no_action")
return parser.parse_args(argv)
def python_interp_details():
print(f"{Fore.CYAN}\u259E\u259E\u259E Python interpreter details:")
details = sys.version.splitlines() + sys.executable.splitlines()
for ln in details:
print(f" {Fore.CYAN}{ln}")
print(Style.RESET_ALL, end="", flush=True)
if __name__ == "__main__":
colorama_init is None or colorama_init(autoreset=True)
python_interp_details()
opts = get_opts(sys.argv[1:])
if os.environ.get("CI") == "true":
if not opts.force:
# All the housekeeping steps are pointless on Travis CI / GitHub Actions;
# they build and tear down their VMs everytime anyways.
print(
f"{Fore.YELLOW}Skipping housekeeping because we're in CI and "
f"--force not specified"
)
sys.exit(0)
else:
print(f"{Fore.YELLOW}We're in CI but --force is specified")
print(
f"{Fore.GREEN}>>> "
f"{Path(__file__).name} {opts.cmd}{Style.RESET_ALL}",
flush=True,
)
dispatcher = globals().get(f"dispatch_{opts.cmd}")
dispatcher()
for _ in range(opts.afterbar):
print(Fore.CYAN + ("\u2550" * (TERM_WIDTH - 1)))
# Defensive reset
print(Style.RESET_ALL, end="", flush=True)
|
DevTools/glueToStrip.py | spiiin/CadEditor | 164 | 11104173 | <reponame>spiiin/CadEditor<gh_stars>100-1000
#Script for glue all png images inside folder to one strip file with name "strip.png"
#Uses python clr module and PIL library
import sys, os
cadEditorDir = os.path.abspath("../CadEditor/") + "/"
sys.path.append(cadEditorDir)
import clr
clr.AddReference("CadEditor")
clr.AddReference("System")
import System
from CadEditor import UtilsGDI
tiles = [System.Drawing.Bitmap.FromFile(x) for x in os.listdir(".") if x.endswith(".png")]
import PIL
d = UtilsGDI.GlueImages(tiles, len(tiles), 1)
d.Save("strip.png") |
tests/v1/nodes.py | tombry/virlutils | 133 | 11104179 | <filename>tests/v1/nodes.py<gh_stars>100-1000
from . import BaseTest
from click.testing import CliRunner
import requests_mock
from virl.cli.main import virl
class NodesTest(BaseTest):
def mock_response(self):
sim_response = {
"guest|TEST_ENV|virl|router1": {
"Status": "ACTIVE",
"simLaunch": "2018-04-04T14:25:12.916689",
"PortConsole": 17000,
"NodeName": "router1",
"simExpires": None,
"managementIP": "1.1.1.1",
"SerialPorts": 2,
"SimulationHost": "5.5.5.5",
"NodeSubtype": "IOSv",
"simStatus": "ACTIVE",
"Reachable": True,
"PortMonitor": 17001,
"managementProtocol": "telnet",
"managementProxy": "jumphost",
"VncConsole": False,
"simID": "TEST_ENV",
"Annotation": "REACHABLE"
},
"guest|TEST_ENV|virl|router2": {
"Status": "BUILDING",
"simLaunch": "2018-04-04T14:25:12.916689",
"PortConsole": 17003,
"NodeName": "router2",
"simExpires": None,
"managementIP": "2.2.2.2",
"SerialPorts": 2,
"SimulationHost": "5.5.5.5",
"NodeSubtype": "IOSv",
"simStatus": "ACTIVE",
"Reachable": True,
"PortMonitor": 17004,
"managementProtocol": "telnet",
"managementProxy": "jumphost",
"VncConsole": False,
"simID": "TEST_ENV",
"Annotation": "REACHABLE"
},
"guest|TEST_ENV|virl|router3": {
"Status": "",
"simLaunch": "2018-04-04T14:25:12.916689",
"PortConsole": 17003,
"NodeName": "router2",
"simExpires": None,
"managementIP": "2.2.2.2",
"SerialPorts": 2,
"SimulationHost": "5.5.5.5",
"NodeSubtype": "IOSv",
"simStatus": "ACTIVE",
"Reachable": True,
"PortMonitor": 17004,
"managementProtocol": "telnet",
"managementProxy": "jumphost",
"VncConsole": False,
"simID": "TEST_ENV",
"Annotation": "REACHABLE"
},
"guest|TEST_ENV|virl|mgmt-lxc": {
"Status": "",
"simLaunch": "2018-04-04T14:25:12.916689",
"PortConsole": 17003,
"NodeName": "router2",
"simExpires": None,
"managementIP": "2.2.2.2",
"SerialPorts": 2,
"SimulationHost": "5.5.5.5",
"NodeSubtype": "LXC FLAT",
"simStatus": "ACTIVE",
"Reachable": True,
"PortMonitor": 17004,
"managementProtocol": "telnet",
"managementProxy": "jumphost",
"VncConsole": False,
"simID": "TEST_ENV",
"Annotation": "REACHABLE"
}
}
return sim_response
def test_virl_nodes(self):
with requests_mock.mock() as m:
# Mock the request to return what we expect from the API.
m.get('http://localhost:19399/roster/rest/',
json=self.mock_response())
runner = CliRunner()
result = runner.invoke(virl, ["nodes"])
self.assertEqual(0, result.exit_code)
def test_virl_nodes_in_env(self):
with requests_mock.mock() as m:
# Mock the request to return what we expect from the API.
m.get('http://localhost:19399/roster/rest/',
json=self.mock_response())
runner = CliRunner()
result = runner.invoke(virl, ["nodes", "foo"])
self.assertEqual(0, result.exit_code)
|
function_scheduling_distributed_framework/function_result_web/app.py | ydf0509/distributed_framework | 333 | 11104197 | <reponame>ydf0509/distributed_framework
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/9/18 0018 14:46
import datetime
import json
from flask import render_template, Flask, request, url_for, jsonify, flash, redirect
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length
from flask_login import login_user, logout_user, login_required, LoginManager, UserMixin
from function_scheduling_distributed_framework import nb_print
from function_scheduling_distributed_framework.function_result_web.functions import get_cols, query_result, get_speed, Statistic
app = Flask(__name__)
app.secret_key = 'mtfy54321'
app.config['JSON_AS_ASCII'] = False
bootstrap = Bootstrap(app)
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
login_manager.login_message = 'Access denied.'
login_manager.init_app(app)
class User(UserMixin):
pass
users = [
{'id': 'Tom', 'user_name': 'Tom', 'password': '<PASSWORD>'},
{'id': 'user', 'user_name': 'user', 'password': '<PASSWORD>'},
{'id': 'admin', 'user_name': 'admin', 'password': '<PASSWORD>'}
]
def query_user(user_name):
for user in users:
if user_name == user['user_name']:
return user
@login_manager.user_loader
def load_user(user_id):
if query_user(user_id) is not None:
curr_user = User()
curr_user.id = user_id
return curr_user
class LoginForm(FlaskForm):
user_name = StringField(u'用户名', validators=[DataRequired(), Length(3, 64)])
password = PasswordField(u'密码', validators=[DataRequired(), Length(3, 64)])
remember_me = BooleanField(u'记住我')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if request.method == 'POST':
nb_print(form.validate())
nb_print(form.password.data)
nb_print(form.user_name.data)
nb_print(form.user_name.errors)
nb_print(form.password.errors)
if form.validate_on_submit():
user = query_user(form.user_name.data)
if user is not None and request.form['password'] == user['password']:
curr_user = User()
curr_user.id = form.user_name.data
# 通过Flask-Login的login_user方法登录用户
nb_print(form.remember_me.data)
login_user(curr_user, remember=form.remember_me.data, duration=datetime.timedelta(days=7))
return redirect(url_for('index'))
flash('用户名或密码错误', category='error')
# if form.user_name.data == 'user' and form.password.data == '<PASSWORD>':
# login_user(form.user_name.data, form.remember_me.data)
# return redirect(url_for('index'))
# else:
# flash('账号或密码错误',category='error')
# return render_template('login4.html', form=form)
return render_template('login.html', form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/')
@login_required
def index():
return render_template('index.html')
@app.route('/query_cols')
@login_required
def query_cols_view():
nb_print(request.args)
return jsonify(get_cols(request.args.get('col_name_search')))
@app.route('/query_result')
@login_required
def query_result_view():
nb_print(request.values.to_dict())
return jsonify(query_result(**request.values.to_dict()))
@app.route('/speed_stats')
@login_required
def speed_stats():
return jsonify(get_speed(**request.values.to_dict()))
@app.route('/speed_statistic_for_echarts')
@login_required
def speed_statistic_for_echarts():
stat = Statistic(request.args.get('col_name'))
stat.build_result()
return jsonify(stat.result)
if __name__ == '__main__':
app.jinja_env.auto_reload = True
with app.test_request_context():
print(url_for('query_cols_view'))
app.run(debug=True, threaded=True, host='0.0.0.0', port=27018)
'''
# 第一步 export PYTHONPATH=你的项目根目录 ,这么做是为了这个web可以读取到你项目根目录下的distributed_frame_config.py里面的配置
# 例如 export PYTHONPATH=/home/ydf/codes/ydfhome
或者 export PYTHONPATH=./ (./是相对路径,前提是已近cd到你的项目根目录了,也可以写绝对路径全路径)
第二步
win上这么做 python3 -m function_scheduling_distributed_framework.function_result_web.app
linux上可以这么做性能好一些,也可以按win的做。
gunicorn -w 4 --threads=30 --bind 0.0.0.0:27018 function_scheduling_distributed_framework.function_result_web.app:app
'''
|
protonfixes/gamefixes/253230.py | Citiroller/protonfixes | 213 | 11104217 | <reponame>Citiroller/protonfixes
""" Game fix for A Hat in Time
"""
#pylint: disable=C0103
from protonfixes import util
from protonfixes.protonversion import DeprecatedSince
@DeprecatedSince("5.0-3")
def main():
""" Enables D9VK
"""
util.enable_d9vk()
|
python-for-multimedia/extract-frames-from-video/extract_frames_opencv.py | caesarcc/python-code-tutorials | 1,059 | 11104237 | from datetime import timedelta
import cv2
import numpy as np
import os
# i.e if video of duration 30 seconds, saves 10 frame per second = 300 frames saved in total
SAVING_FRAMES_PER_SECOND = 10
def format_timedelta(td):
"""Utility function to format timedelta objects in a cool way (e.g 00:00:20.05)
omitting microseconds and retaining milliseconds"""
result = str(td)
try:
result, ms = result.split(".")
except ValueError:
return result + ".00".replace(":", "-")
ms = int(ms)
ms = round(ms / 1e4)
return f"{result}.{ms:02}".replace(":", "-")
def get_saving_frames_durations(cap, saving_fps):
"""A function that returns the list of durations where to save the frames"""
s = []
# get the clip duration by dividing number of frames by the number of frames per second
clip_duration = cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS)
# use np.arange() to make floating-point steps
for i in np.arange(0, clip_duration, 1 / saving_fps):
s.append(i)
return s
def main(video_file):
filename, _ = os.path.splitext(video_file)
filename += "-opencv"
# make a folder by the name of the video file
if not os.path.isdir(filename):
os.mkdir(filename)
# read the video file
cap = cv2.VideoCapture(video_file)
# get the FPS of the video
fps = cap.get(cv2.CAP_PROP_FPS)
# if the SAVING_FRAMES_PER_SECOND is above video FPS, then set it to FPS (as maximum)
saving_frames_per_second = min(fps, SAVING_FRAMES_PER_SECOND)
# get the list of duration spots to save
saving_frames_durations = get_saving_frames_durations(cap, saving_frames_per_second)
# start the loop
count = 0
while True:
is_read, frame = cap.read()
if not is_read:
# break out of the loop if there are no frames to read
break
# get the duration by dividing the frame count by the FPS
frame_duration = count / fps
try:
# get the earliest duration to save
closest_duration = saving_frames_durations[0]
except IndexError:
# the list is empty, all duration frames were saved
break
if frame_duration >= closest_duration:
# if closest duration is less than or equals the frame duration,
# then save the frame
frame_duration_formatted = format_timedelta(timedelta(seconds=frame_duration))
cv2.imwrite(os.path.join(filename, f"frame{frame_duration_formatted}.jpg"), frame)
# drop the duration spot from the list, since this duration spot is already saved
try:
saving_frames_durations.pop(0)
except IndexError:
pass
# increment the frame count
count += 1
if __name__ == "__main__":
import sys
video_file = sys.argv[1]
main(video_file) |
nebullvm/inference_learners/__init__.py | nebuly-ai/nebullvm | 821 | 11104245 | # flake8: noqa
from nebullvm.inference_learners.base import (
BaseInferenceLearner,
LearnerMetadata,
PytorchBaseInferenceLearner,
TensorflowBaseInferenceLearner,
InferenceLearnerWrapper,
)
from nebullvm.inference_learners.onnx import (
ONNXInferenceLearner,
PytorchONNXInferenceLearner,
TensorflowONNXInferenceLearner,
)
from nebullvm.inference_learners.openvino import (
OpenVinoInferenceLearner,
PytorchOpenVinoInferenceLearner,
TensorflowOpenVinoInferenceLearner,
)
from nebullvm.inference_learners.tensor_rt import (
NvidiaInferenceLearner,
PytorchNvidiaInferenceLearner,
TensorflowNvidiaInferenceLearner,
)
from nebullvm.inference_learners.tvm import (
ApacheTVMInferenceLearner,
PytorchApacheTVMInferenceLearner,
TensorflowApacheTVMInferenceLearner,
)
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
QAdemo_base1/tmodel2.py | WenRichard/QAmodel-for-Retrivalchatbot | 403 | 11104282 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# @Time : 2019/4/4 15:03
# @Author : Alan
# @Email : <EMAIL>
# @File : tmodel2.py
# @Software: PyCharm
""" 利用倒排表进行优化 """
import pandas as pd
import matplotlib as mpl
import numpy as np
from nltk.probability import FreqDist
import time
from jiebaSegment import *
from sentenceSimilarity import SentenceSimilarity
mpl.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # enable chinese
def read_corpus():
qList = []
# 问题的关键词列表
qList_kw = []
aList = []
data = pd.read_csv('./data/qa_.csv', header=None)
data_ls = np.array(data).tolist()
for t in data_ls:
qList.append(t[0])
qList_kw.append(seg.cut(t[0]))
aList.append(t[1])
return qList_kw, qList, aList
def plot_words(wordList):
fDist = FreqDist(wordList)
#print(fDist.most_common())
print("单词总数: ",fDist.N())
print("不同单词数: ",fDist.B())
fDist.plot(10)
def invert_idxTable(qList_kw): # 定一个一个简单的倒排表
invertTable = {}
for idx, tmpLst in enumerate(qList_kw):
for kw in tmpLst:
if kw in invertTable.keys():
invertTable[kw].append(idx)
else:
invertTable[kw] = [idx]
return invertTable
def filter_questionByInvertTab(inputQuestionKW, questionList, answerList, invertTable):
idxLst = []
questions = []
answers = []
for kw in inputQuestionKW:
if kw in invertTable.keys():
idxLst.extend(invertTable[kw])
idxSet = set(idxLst)
for idx in idxSet:
questions.append(questionList[idx])
answers.append(answerList[idx])
return questions, answers
if __name__ == '__main__':
# 设置外部词
seg = Seg()
seg.load_userdict('./userdict/userdict.txt')
# 读取数据
qList_kw, questionList, answerList = read_corpus()
"""简单的倒排索引"""
# 计算倒排表
invertTable = invert_idxTable(qList_kw)
while True:
question = input("请输入问题(q退出): ")
time1 = time.time()
if question == 'q':
break
inputQuestionKW = seg.cut(question)
# 利用关键词匹配得到与原来相似的问题集合
questionList_s, answerList_s = filter_questionByInvertTab(inputQuestionKW, questionList, answerList,
invertTable)
print(questionList_s)
if len(questionList_s) > 1:
questionList = questionList_s
answerList = answerList_s
# 初始化模型
ss = SentenceSimilarity(seg)
ss.set_sentences(questionList)
ss.TfidfModel() # tfidf模型
# ss.LsiModel() # lsi模型
# ss.LdaModel() # lda模型
question_k = ss.similarity_k(question, 5)
print("亲,我们给您找到的答案是: {}".format(answerList[question_k[0][0]]))
for idx, score in zip(*question_k):
print("same questions: {}, score: {}".format(questionList[idx], score))
time2 = time.time()
cost = time2 - time1
print('Time cost: {} s'.format(cost))
|
examples/constrained_decoding/tok.py | kayoyin/DialogueMT | 143 | 11104291 | #!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import sacremoses
def main(args):
"""Tokenizes, preserving tabs"""
mt = sacremoses.MosesTokenizer(lang=args.lang)
def tok(s):
return mt.tokenize(s, return_str=True)
for line in sys.stdin:
parts = list(map(tok, line.split("\t")))
print(*parts, sep="\t", flush=True)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--lang', '-l', default='en')
parser.add_argument('--penn', '-p', action='store_true')
parser.add_argument('--fields', '-f', help="fields to tokenize")
args = parser.parse_args()
main(args)
|
examples/code/data/corpus_twenty_news.py | chschroeder/small-text | 218 | 11104322 | <reponame>chschroeder/small-text<gh_stars>100-1000
from sklearn.datasets import fetch_20newsgroups
def get_twenty_newsgroups_corpus(categories=['rec.sport.baseball', 'rec.sport.hockey']):
train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'),
categories=categories)
test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'),
categories=categories)
return train, test
|
tests/misc/sys_settrace_subdir/sys_settrace_generic.py | sebastien-riou/micropython | 13,648 | 11104324 | <reponame>sebastien-riou/micropython<filename>tests/misc/sys_settrace_subdir/sys_settrace_generic.py
print("Now comes the language constructions tests.")
# function
def test_func():
def test_sub_func():
print("test_function")
test_sub_func()
# closure
def test_closure(msg):
def make_closure():
print(msg)
return make_closure
# exception
def test_exception():
try:
raise Exception("test_exception")
except Exception:
pass
finally:
pass
# listcomp
def test_listcomp():
print("test_listcomp", [x for x in range(3)])
# lambda
def test_lambda():
func_obj_1 = lambda a, b: a + b
print(func_obj_1(10, 20))
# import
def test_import():
from sys_settrace_subdir import sys_settrace_importme
sys_settrace_importme.dummy()
sys_settrace_importme.saysomething()
# class
class TLClass:
def method():
pass
pass
def test_class():
class TestClass:
__anynum = -9
def method(self):
print("test_class_method")
self.__anynum += 1
def prprty_getter(self):
return self.__anynum
def prprty_setter(self, what):
self.__anynum = what
prprty = property(prprty_getter, prprty_setter)
cls = TestClass()
cls.method()
print("test_class_property", cls.prprty)
cls.prprty = 12
print("test_class_property", cls.prprty)
def run_tests():
test_func()
test_closure_inst = test_closure("test_closure")
test_closure_inst()
test_exception()
test_listcomp()
test_lambda()
test_class()
test_import()
print("And it's done!")
|
contrib/to-rm-mx-contrib-text/d2lzh/text/vocab.py | luzhongqiu/d2l-zh | 27,074 | 11104331 | class Vocabulary:
def __init__(self, counter, min_freq=0, reserved_tokens=None):
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
self.token_freqs = sorted(counter.items(), key=lambda x: x[0])
self.token_freqs.sort(key=lambda x: x[1], reverse=True)
self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens
uniq_tokens += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def to_indices(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.to_indices(token) for token in tokens] |
recipes/Python/52208_Replace_if_while_statement_expressions/recipe-52208.py | tdiprima/code | 2,023 | 11104351 | Before:
if i < 1:
doSomething()
After:
if 0: # i< 1
doSomething()
|
crawler/tests/test_log_retry_middleware.py | shinji-s/scrapy-cluster | 1,108 | 11104390 | <reponame>shinji-s/scrapy-cluster<gh_stars>1000+
'''
Offline tests
'''
from unittest import TestCase
import mock
from mock import MagicMock
from crawling.log_retry_middleware import LogRetryMiddleware
class TestLogRetryMiddlewareStats(TestCase):
@mock.patch('crawling.log_retry_middleware.LogRetryMiddleware' \
'.setup')
def setUp(self, s):
self.lrm = LogRetryMiddleware(MagicMock())
self.lrm.settings = {}
self.lrm.name = 'OverrideSpider'
self.lrm.redis_conn = MagicMock()
self.lrm.logger = MagicMock()
self.lrm.settings['STATS_CYCLE'] = 5
self.lrm.settings['STATS_TIMES'] = []
self.lrm._get_hostname = MagicMock(return_value='host1')
def test_lrm_stats_setup(self):
self.lrm.stats_dict = {}
# test nothing
self.lrm._setup_stats_status_codes()
self.assertEqual([str(x) for x in self.lrm.stats_dict.keys()], ['lifetime'])
# test good/bad rolling stats
self.lrm.stats_dict = {}
self.lrm.settings['STATS_TIMES'] = [
'SECONDS_15_MINUTE',
'SECONDS_1_HOUR',
'SECONDS_DUMB',
]
good = [
'lifetime', # for totals, not DUMB
'900',
'3600',
]
# check that both keys are set up
self.lrm._setup_stats_status_codes()
self.assertEqual(
sorted([str(x) for x in self.lrm.stats_dict.keys()]),
sorted(good))
k1 = 'stats:crawler:host1:OverrideSpider:504'
for time_key in self.lrm.stats_dict:
if time_key == 0:
self.assertEqual(
self.lrm.stats_dict[0].key,
'{k}:lifetime'.format(k=k1)
)
else:
self.assertEqual(
self.lrm.stats_dict[time_key].key,
'{k}:{t}'.format(k=k1, t=time_key)
)
|
pl_bolts/models/rl/sac_model.py | lavoiems/lightning-bolts | 504 | 11104398 | <reponame>lavoiems/lightning-bolts
"""Soft Actor Critic."""
import argparse
from typing import Dict, List, Tuple
import numpy as np
import torch
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from torch import Tensor, optim
from torch.nn import functional as F
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from pl_bolts.datamodules.experience_source import Experience, ExperienceSourceDataset
from pl_bolts.models.rl.common.agents import SoftActorCriticAgent
from pl_bolts.models.rl.common.memory import MultiStepBuffer
from pl_bolts.models.rl.common.networks import MLP, ContinuousMLP
from pl_bolts.utils import _GYM_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _GYM_AVAILABLE:
import gym
else: # pragma: no cover
warn_missing_pkg("gym")
Env = object
class SAC(LightningModule):
def __init__(
self,
env: str,
eps_start: float = 1.0,
eps_end: float = 0.02,
eps_last_frame: int = 150000,
sync_rate: int = 1,
gamma: float = 0.99,
policy_learning_rate: float = 3e-4,
q_learning_rate: float = 3e-4,
target_alpha: float = 5e-3,
batch_size: int = 128,
replay_size: int = 1000000,
warm_start_size: int = 10000,
avg_reward_len: int = 100,
min_episode_reward: int = -21,
seed: int = 123,
batches_per_epoch: int = 10000,
n_steps: int = 1,
**kwargs,
):
super().__init__()
# Environment
self.env = gym.make(env)
self.test_env = gym.make(env)
self.obs_shape = self.env.observation_space.shape
self.n_actions = self.env.action_space.shape[0]
# Model Attributes
self.buffer = None
self.dataset = None
self.policy = None
self.q1 = None
self.q2 = None
self.target_q1 = None
self.target_q2 = None
self.build_networks()
self.agent = SoftActorCriticAgent(self.policy)
# Hyperparameters
self.save_hyperparameters()
# Metrics
self.total_episode_steps = [0]
self.total_rewards = [0]
self.done_episodes = 0
self.total_steps = 0
# Average Rewards
self.avg_reward_len = avg_reward_len
for _ in range(avg_reward_len):
self.total_rewards.append(torch.tensor(min_episode_reward, device=self.device))
self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len :]))
self.state = self.env.reset()
self.automatic_optimization = False
def run_n_episodes(self, env, n_epsiodes: int = 1) -> List[int]:
"""Carries out N episodes of the environment with the current agent without exploration.
Args:
env: environment to use, either train environment or test environment
n_epsiodes: number of episodes to run
"""
total_rewards = []
for _ in range(n_epsiodes):
episode_state = env.reset()
done = False
episode_reward = 0
while not done:
action = self.agent.get_action(episode_state, self.device)
next_state, reward, done, _ = env.step(action[0])
episode_state = next_state
episode_reward += reward
total_rewards.append(episode_reward)
return total_rewards
def populate(self, warm_start: int) -> None:
"""Populates the buffer with initial experience."""
if warm_start > 0:
self.state = self.env.reset()
for _ in range(warm_start):
action = self.agent(self.state, self.device)
next_state, reward, done, _ = self.env.step(action[0])
exp = Experience(state=self.state, action=action[0], reward=reward, done=done, new_state=next_state)
self.buffer.append(exp)
self.state = next_state
if done:
self.state = self.env.reset()
def build_networks(self) -> None:
"""Initializes the SAC policy and q networks (with targets)"""
action_bias = torch.from_numpy((self.env.action_space.high + self.env.action_space.low) / 2)
action_scale = torch.from_numpy((self.env.action_space.high - self.env.action_space.low) / 2)
self.policy = ContinuousMLP(self.obs_shape, self.n_actions, action_bias=action_bias, action_scale=action_scale)
concat_shape = [self.obs_shape[0] + self.n_actions]
self.q1 = MLP(concat_shape, 1)
self.q2 = MLP(concat_shape, 1)
self.target_q1 = MLP(concat_shape, 1)
self.target_q2 = MLP(concat_shape, 1)
self.target_q1.load_state_dict(self.q1.state_dict())
self.target_q2.load_state_dict(self.q2.state_dict())
def soft_update_target(self, q_net, target_net):
"""Update the weights in target network using a weighted sum.
w_target := (1-a) * w_target + a * w_q
Args:
q_net: the critic (q) network
target_net: the target (q) network
"""
for q_param, target_param in zip(q_net.parameters(), target_net.parameters()):
target_param.data.copy_(
(1.0 - self.hparams.target_alpha) * target_param.data + self.hparams.target_alpha * q_param
)
def forward(self, x: Tensor) -> Tensor:
"""Passes in a state x through the network and gets the q_values of each action as an output.
Args:
x: environment state
Returns:
q values
"""
output = self.policy(x).sample()
return output
def train_batch(
self,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Contains the logic for generating a new batch of data to be passed to the DataLoader.
Returns:
yields a Experience tuple containing the state, action, reward, done and next_state.
"""
episode_reward = 0
episode_steps = 0
while True:
self.total_steps += 1
action = self.agent(self.state, self.device)
next_state, r, is_done, _ = self.env.step(action[0])
episode_reward += r
episode_steps += 1
exp = Experience(state=self.state, action=action[0], reward=r, done=is_done, new_state=next_state)
self.buffer.append(exp)
self.state = next_state
if is_done:
self.done_episodes += 1
self.total_rewards.append(episode_reward)
self.total_episode_steps.append(episode_steps)
self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len :]))
self.state = self.env.reset()
episode_steps = 0
episode_reward = 0
states, actions, rewards, dones, new_states = self.buffer.sample(self.hparams.batch_size)
for idx, _ in enumerate(dones):
yield states[idx], actions[idx], rewards[idx], dones[idx], new_states[idx]
# Simulates epochs
if self.total_steps % self.hparams.batches_per_epoch == 0:
break
def loss(self, batch: Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]) -> Tuple[Tensor, Tensor, Tensor]:
"""Calculates the loss for SAC which contains a total of 3 losses.
Args:
batch: a batch of states, actions, rewards, dones, and next states
"""
states, actions, rewards, dones, next_states = batch
rewards = rewards.unsqueeze(-1)
dones = dones.float().unsqueeze(-1)
# actor
dist = self.policy(states)
new_actions, new_logprobs = dist.rsample_and_log_prob()
new_logprobs = new_logprobs.unsqueeze(-1)
new_states_actions = torch.cat((states, new_actions), 1)
new_q1_values = self.q1(new_states_actions)
new_q2_values = self.q2(new_states_actions)
new_qmin_values = torch.min(new_q1_values, new_q2_values)
policy_loss = (new_logprobs - new_qmin_values).mean()
# critic
states_actions = torch.cat((states, actions), 1)
q1_values = self.q1(states_actions)
q2_values = self.q2(states_actions)
with torch.no_grad():
next_dist = self.policy(next_states)
new_next_actions, new_next_logprobs = next_dist.rsample_and_log_prob()
new_next_logprobs = new_next_logprobs.unsqueeze(-1)
new_next_states_actions = torch.cat((next_states, new_next_actions), 1)
next_q1_values = self.target_q1(new_next_states_actions)
next_q2_values = self.target_q2(new_next_states_actions)
next_qmin_values = torch.min(next_q1_values, next_q2_values) - new_next_logprobs
target_values = rewards + (1.0 - dones) * self.hparams.gamma * next_qmin_values
q1_loss = F.mse_loss(q1_values, target_values)
q2_loss = F.mse_loss(q2_values, target_values)
return policy_loss, q1_loss, q2_loss
def training_step(self, batch: Tuple[Tensor, Tensor], _):
"""Carries out a single step through the environment to update the replay buffer. Then calculates loss
based on the minibatch recieved.
Args:
batch: current mini batch of replay data
_: batch number, not used
"""
policy_optim, q1_optim, q2_optim = self.optimizers()
policy_loss, q1_loss, q2_loss = self.loss(batch)
policy_optim.zero_grad()
self.manual_backward(policy_loss)
policy_optim.step()
q1_optim.zero_grad()
self.manual_backward(q1_loss)
q1_optim.step()
q2_optim.zero_grad()
self.manual_backward(q2_loss)
q2_optim.step()
# Soft update of target network
if self.global_step % self.hparams.sync_rate == 0:
self.soft_update_target(self.q1, self.target_q1)
self.soft_update_target(self.q2, self.target_q2)
self.log_dict(
{
"total_reward": self.total_rewards[-1],
"avg_reward": self.avg_rewards,
"policy_loss": policy_loss,
"q1_loss": q1_loss,
"q2_loss": q2_loss,
"episodes": self.done_episodes,
"episode_steps": self.total_episode_steps[-1],
}
)
def test_step(self, *args, **kwargs) -> Dict[str, Tensor]:
"""Evaluate the agent for 10 episodes."""
test_reward = self.run_n_episodes(self.test_env, 1)
avg_reward = sum(test_reward) / len(test_reward)
return {"test_reward": avg_reward}
def test_epoch_end(self, outputs) -> Dict[str, Tensor]:
"""Log the avg of the test results."""
rewards = [x["test_reward"] for x in outputs]
avg_reward = sum(rewards) / len(rewards)
self.log("avg_test_reward", avg_reward)
return {"avg_test_reward": avg_reward}
def _dataloader(self) -> DataLoader:
"""Initialize the Replay Buffer dataset used for retrieving experiences."""
self.buffer = MultiStepBuffer(self.hparams.replay_size, self.hparams.n_steps)
self.populate(self.hparams.warm_start_size)
self.dataset = ExperienceSourceDataset(self.train_batch)
return DataLoader(dataset=self.dataset, batch_size=self.hparams.batch_size)
def train_dataloader(self) -> DataLoader:
"""Get train loader."""
return self._dataloader()
def test_dataloader(self) -> DataLoader:
"""Get test loader."""
return self._dataloader()
def configure_optimizers(self) -> Tuple[Optimizer]:
"""Initialize Adam optimizer."""
policy_optim = optim.Adam(self.policy.parameters(), self.hparams.policy_learning_rate)
q1_optim = optim.Adam(self.q1.parameters(), self.hparams.q_learning_rate)
q2_optim = optim.Adam(self.q2.parameters(), self.hparams.q_learning_rate)
return policy_optim, q1_optim, q2_optim
@staticmethod
def add_model_specific_args(
arg_parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
"""Adds arguments for DQN model.
Note:
These params are fine tuned for Pong env.
Args:
arg_parser: parent parser
"""
arg_parser.add_argument(
"--sync_rate",
type=int,
default=1,
help="how many frames do we update the target network",
)
arg_parser.add_argument(
"--replay_size",
type=int,
default=1000000,
help="capacity of the replay buffer",
)
arg_parser.add_argument(
"--warm_start_size",
type=int,
default=10000,
help="how many samples do we use to fill our buffer at the start of training",
)
arg_parser.add_argument("--batches_per_epoch", type=int, default=10000, help="number of batches in an epoch")
arg_parser.add_argument("--batch_size", type=int, default=128, help="size of the batches")
arg_parser.add_argument("--policy_lr", type=float, default=3e-4, help="policy learning rate")
arg_parser.add_argument("--q_lr", type=float, default=3e-4, help="q learning rate")
arg_parser.add_argument("--env", type=str, required=True, help="gym environment tag")
arg_parser.add_argument("--gamma", type=float, default=0.99, help="discount factor")
arg_parser.add_argument(
"--avg_reward_len",
type=int,
default=100,
help="how many episodes to include in avg reward",
)
arg_parser.add_argument(
"--n_steps",
type=int,
default=1,
help="how many frames do we update the target network",
)
return arg_parser
def cli_main():
parser = argparse.ArgumentParser(add_help=False)
# trainer args
parser = Trainer.add_argparse_args(parser)
# model args
parser = SAC.add_model_specific_args(parser)
args = parser.parse_args()
model = SAC(**args.__dict__)
# save checkpoints based on avg_reward
checkpoint_callback = ModelCheckpoint(save_top_k=1, monitor="avg_reward", mode="max", verbose=True)
seed_everything(123)
trainer = Trainer.from_argparse_args(args, deterministic=True, callbacks=checkpoint_callback)
trainer.fit(model)
if __name__ == "__main__":
cli_main()
|
tests/test_jupyter.py | traviscook21/rich | 33,622 | 11104455 | from rich.console import Console
def test_jupyter():
console = Console(force_jupyter=True)
assert console.width == 93
assert console.color_system == "truecolor"
|
lv_set/potential_func.py | Ramesh-X/Level-Set | 122 | 11104505 | <reponame>Ramesh-X/Level-Set
"""
This python code demonstrates an edge-based active contour model as an application of the
Distance Regularized Level Set Evolution (DRLSE) formulation in the following paper:
<NAME>, <NAME>, <NAME>, <NAME>, "Distance Regularized Level Set Evolution and Its Application to Image Segmentation",
IEEE Trans. Image Processing, vol. 19 (12), pp. 3243-3254, 2010.
Author: <NAME>
E-mail: <EMAIL>
Released Under MIT License
"""
# use single well potential p1(s)=0.5*(s-1)^2, which is good for region-based model
DOUBLE_WELL = 'double-well'
# use double-well potential in Eq. (16), which is good for both edge and region based models
SINGLE_WELL = 'single-well'
|
obsei/process_workflow.py | kuutsav/obsei | 359 | 11104516 | <gh_stars>100-1000
import logging
from obsei.configuration import ObseiConfiguration
logger = logging.getLogger(__name__)
# Extract config via yaml file using `config_path` and `config_filename`
obsei_configuration = ObseiConfiguration()
# Initialize objects using configuration
source_config = obsei_configuration.initialize_instance("source_config")
source = obsei_configuration.initialize_instance("source")
analyzer = obsei_configuration.initialize_instance("analyzer")
analyzer_config = obsei_configuration.initialize_instance("analyzer_config")
sink_config = obsei_configuration.initialize_instance("sink_config")
sink = obsei_configuration.initialize_instance("sink")
# This will fetch information from configured source ie twitter, app store etc
source_response_list = source.lookup(source_config)
for idx, source_response in enumerate(source_response_list):
logger.info(f"source_response#'{idx}'='{vars(source_response)}'")
# This will execute analyzer (Sentiment, classification etc) on source data with provided analyzer_config
# Analyzer will it's output to `segmented_data` inside `analyzer_response`
analyzer_response_list = analyzer.analyze_input(
source_response_list=source_response_list,
analyzer_config=analyzer_config
)
for idx, analyzer_response in enumerate(analyzer_response_list):
logger.info(f"source_response#'{idx}'='{vars(analyzer_response)}'")
# This will send analyzed output to configure sink ie Slack, Zendesk etc
sink_response_list = sink.send_data(analyzer_response_list, sink_config)
for idx, sink_response in enumerate(sink_response_list):
logger.info(f"source_response#'{idx}'='{vars(sink_response)}'")
|
test/small_test.py | screamingskulls/sofi | 402 | 11104536 | <gh_stars>100-1000
from sofi.ui import Small
def test_basic():
assert(str(Small()) == "<small></small>")
def test_text():
assert(str(Small("text")) == "<small>text</small>")
def test_custom_class_ident_style_and_attrs():
assert(str(Small("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<small id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</small>")
|
src/listeners/hover_description.py | PranjalPansuriya/JavaScriptEnhancements | 690 | 11104540 | import sublime, sublime_plugin
import cgi, os, re
from ..libs import NodeJS
from ..libs.popup_manager import popup_manager
from .completion import load_default_autocomplete
from ..libs import FlowCLI
from ..libs import util
hover_description_css = ""
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "hover_description.css"), encoding="utf-8") as css_file:
hover_description_css = "<style>"+css_file.read()+"</style>"
def description_details_html(description):
description_name = "<span class=\"name\">" + cgi.escape(description['name']) + "</span>"
description_return_type = ""
text_pre_params = ""
parameters_html = ""
if description['func_details'] :
if not description['type'].startswith("(") :
text_pre_params = description['type'][ : description['type'].rfind(" => ") if description['type'].rfind(" => ") >= 0 else None ]
text_pre_params = "<span class=\"text-pre-params\">" + cgi.escape(text_pre_params[:text_pre_params.find("(")]) + "</span>"
for param in description['func_details']["params"]:
is_optional = True if param['name'].find("?") >= 0 else False
param['name'] = cgi.escape(param['name'].replace("?", ""))
param['type'] = cgi.escape(param['type']) if param.get('type') else None
if not parameters_html:
parameters_html += "<span class=\"parameter-name\">" + param['name'] + "</span>" + ( "<span class=\"parameter-is-optional\">?</span>" if is_optional else "" ) + ( ": <span class=\"parameter-type\">" + param['type'] + "</span>" if param['type'] else "" )
else:
parameters_html += ', ' + "<span class=\"parameter-name\">" + param['name'] + "</span>" + ( "<span class=\"parameter-is-optional\">?</span>" if is_optional else "" ) + ( ": <span class=\"parameter-type\">" + param['type'] + "</span>" if param['type'] else "" )
parameters_html = "("+parameters_html+")"
description_return_type = cgi.escape(description['func_details']["return_type"]) if description['func_details']["return_type"] else ""
elif description['type'] :
description_return_type = cgi.escape(description['type'])
if description_return_type :
description_return_type = " => <span class=\"return-type\">"+description_return_type+"</span>"
html = """
<div class=\"container-description\">
<div>"""+description_name+text_pre_params+parameters_html+description_return_type+"""</div>
<div class=\"container-go-to-def\"><a href="go_to_def" class="go-to-def">Go to definition</a></div>
</div>
"""
return html
class JavascriptEnhancementsOnHoverDescriptionEventListener(sublime_plugin.EventListener):
# def on_modified_async(self, view):
# if not view.match_selector(
# point,
# 'source.js - string - constant - comment'
# ):
# return
def on_hover(self, view, point, hover_zone) :
if not view.match_selector(
point,
'source.js - string - constant - comment'
) or not view.settings().get("show_definitions"):
return
if hover_zone != sublime.HOVER_TEXT :
return
for region in view.get_regions("javascript_enhancements_flow_error") + view.get_regions("javascript_enhancements_flow_warning"):
if region.contains(point):
return
try:
# fix for #47 - "Interoperability with sublimelinter"
import SublimeLinter
regions_key = SublimeLinter.highlight_view.get_regions_keys(view)
for key in regions_key:
region = view.get_regions(key)[0]
if region.contains(point):
return
except Exception as e:
pass
region = view.word(point)
word = view.substr(region)
if not word.strip() :
return
view.hide_popup()
sublime.set_timeout_async(lambda: on_hover_description_async(view, point, hover_zone, point))
# used also by ShowHintParametersCommand
def on_hover_description_async(view, point, hover_zone, popup_position, show_hint=False) :
if not view.match_selector(
point,
'source.js - comment'
):
return
if hover_zone != sublime.HOVER_TEXT :
return
if not show_hint:
for region in view.get_regions("javascript_enhancements_flow_error") + view.get_regions("javascript_enhancements_flow_warning"):
if region.contains(point):
return
region = util.word_with_dollar_char(view, point)
word = view.substr(region)
if not word.strip() :
return
cursor_pos = region.end()
flow_cli = FlowCLI(view)
result = flow_cli.autocomplete(cursor_pos=cursor_pos, add_magic_token=True, not_add_last_part_tokenized_line=True)
html = ""
results_found = 0
if result[0]:
descriptions = result[1]["result"] + load_default_autocomplete(view, result[1]["result"], word, region.begin(), True)
for description in descriptions :
if description['name'] == word :
if description['type'].startswith("((") or description['type'].find("&") >= 0 :
sub_completions = description['type'].split("&")
for sub_comp in sub_completions :
results_found += 1
sub_comp = sub_comp.strip()
sub_type = sub_comp[1:-1] if description['type'].startswith("((") else sub_comp
text_params = sub_type[ : sub_type.rfind(" => ") if sub_type.rfind(" => ") >= 0 else None ]
text_params = text_params.strip()
description["func_details"] = dict()
description["func_details"]["params"] = list()
description["func_details"]["return_type"] = ""
if sub_type.rfind(" => ") >= 0 :
description["func_details"]["return_type"] = sub_type[sub_type.rfind(" => ")+4:].strip()
start = 1 if sub_type.find("(") == 0 else sub_type.find("(")+1
end = text_params.rfind(")")
params = text_params[start:end].split(",")
for param in params :
param_dict = dict()
param_info = param.split(":")
param_dict["name"] = param_info[0].strip()
if len(param_info) > 1 :
param_dict["type"] = param_info[1].strip()
description['func_details']["params"].append(param_dict)
html += description_details_html(description)
else :
html += description_details_html(description)
if not html :
row, col = view.rowcol(point)
result = flow_cli.type_at_pos(options=[str(row + 1), str(col + 1)])
if result[0] and result[1].get("type") and result[1]["type"] != "(unknown)":
results_found = 1
description = dict()
description["name"] = ""
description['func_details'] = dict()
description['func_details']["params"] = list()
description['func_details']["return_type"] = ""
is_function = False
matches = re.match("^([a-zA-Z_]\w+)", result[1]["type"])
if matches :
description["name"] = matches.group()
if result[1]["type"].find(" => ") >= 0 :
description['func_details']["return_type"] = cgi.escape(result[1]["type"][result[1]["type"].find(" => ")+4:])
else :
description['func_details']["return_type"] = cgi.escape(result[1]["type"])
if result[1]["type"].find("(") == 0:
is_function = True
start = 1
end = result[1]["type"].find(")")
params = result[1]["type"][start:end].split(",")
description['func_details']["params"] = list()
for param in params :
param_dict = dict()
param_info = param.split(":")
param_dict["name"] = cgi.escape(param_info[0].strip())
if len(param_info) == 2 :
param_dict["type"] = cgi.escape(param_info[1].strip())
else :
param_dict["type"] = None
description['func_details']["params"].append(param_dict)
description_name = "<span class=\"name\">" + cgi.escape(description['name']) + "</span>"
description_return_type = ""
parameters_html = ""
if description['func_details'] :
for param in description['func_details']["params"]:
is_optional = True if param['name'].find("?") >= 0 else False
param['name'] = param['name'].replace("?", "")
if not parameters_html:
parameters_html += "<span class=\"parameter-name\">" + param['name'] + "</span>" + ( "<span class=\"parameter-is-optional\">?</span>" if is_optional else "" ) + ( ": <span class=\"parameter-type\">" + param['type'] + "</span>" if param['type'] else "" )
else:
parameters_html += ', ' + "<span class=\"parameter-name\">" + param['name'] + "</span>" + ( "<span class=\"parameter-is-optional\">?</span>" if is_optional else "" ) + ( ": <span class=\"parameter-type\">" + param['type'] + "</span>" if param['type'] else "" )
parameters_html = "("+parameters_html+")" if is_function else ""
description_return_type = description['func_details']["return_type"]
elif description['type'] :
description_return_type = description['type']
if description_return_type :
description_return_type = (" => " if description['name'] or is_function else "") + "<span class=\"return-type\">"+description_return_type+"</span>"
html += """
<div class=\"container-description\">
<div>"""+description_name+parameters_html+description_return_type+"""</div>
<div class=\"container-go-to-def\"><a href="go_to_def" class="go-to-def">Go to definition</a></div>
</div>
"""
func_action = lambda x: view.run_command("javascript_enhancements_go_to_definition", args={"point": point}) if x == "go_to_def" else ""
if html:
popup_manager.set_visible("javascript_enhancements_hint_parameters", True)
view.show_popup("""
<html>
<body class=\"""" + ("single-result-found" if results_found == 1 else "more-results-found") + """\">
""" + hover_description_css + """
<div class=\"container-hint-popup\">
""" + html + """
</div>
</body>
</html>""", sublime.COOPERATE_WITH_AUTO_COMPLETE | sublime.HIDE_ON_MOUSE_MOVE_AWAY, popup_position, 1150, 80 if results_found == 1 else 160, func_action, lambda: popup_manager.set_visible("javascript_enhancements_hint_parameters", False) )
|
kpm/api/info.py | ericchiang/kpm | 121 | 11104554 | <filename>kpm/api/info.py
from flask import (jsonify, request, Blueprint, redirect, render_template, current_app, url_for)
import kpm
import appr
info_app = Blueprint('info', __name__) # pylint: disable=C0103
@info_app.before_app_request
def pre_request_logging():
jsonbody = request.get_json(force=True, silent=True)
values = request.values.to_dict()
if jsonbody:
values.update(jsonbody)
current_app.logger.info("request", extra={
"remote_addr": request.remote_addr,
"http_method": request.method,
"original_url": request.url,
"path": request.path,
"data": values,
"headers": dict(request.headers.to_list())
})
@info_app.route("/")
def index_discovery():
host = request.url_root
domain = request.headers['Host']
return """<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="appr-package" content="{domain}/{{name}} {host}/api/v1/packages/{{name}}/pull">
</head>
<body>
</body>
</html>""".format(domain=domain, host=host)
@info_app.route("/dashboard", strict_slashes=False)
def index():
return redirect("/dashboard/index.html")
@info_app.route('/dashboard/config/config.js')
def configjs(name=None):
host = request.url_root
domain = request.headers['Host']
return render_template('config.js', domain=domain, host=host)
@info_app.route("/version")
def version():
return jsonify({"kpm-api": kpm.__version__, "appr-api": appr.__version__})
@info_app.route("/routes")
def routes():
import urllib
output = []
for rule in current_app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
lines = []
for line in sorted(output):
lines.append(line)
return jsonify({"routes": lines})
@info_app.route("/test_timeout")
def test_timeout():
import time
time.sleep(60)
return jsonify({"kpm": kpm.__version__})
|
xfdnn/rt/xdnn_opt.py | yarenty/ml-suite | 334 | 11104597 | #!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import copy
import pydot
import sys
from json import loads as _loads
from collections import defaultdict, OrderedDict
from os.path import exists as _exists
from six import string_types as _string_types
import operator, pprint
from os import listdir as _listdir
from os.path import join as _join
import json
import numpy as np
from xfdnn.rt.xdnn_env import xdnn_env as _xdnn_env, xdnn_fpga_env as _xdnn_fpga_env
from xfdnn.rt.xdnn_util import DefaultOrderedDict
from xfdnn.tools.compile.network import tensor_tools as tt
from xfdnn.tools.compile.network import keras_tools as kt
from xfdnn.tools.emu.factory import factory as _factory
from xfdnn.tools.emu.conv_layer import conv_layer as _conv_layer
from xfdnn.tools.emu.eltwise_layer import eltwise_layer as _eltwise_layer
from xfdnn.tools.emu.scale_layer import scale_layer as _scale_layer
from xfdnn.tools.emu.concat_layer import concat_layer as _concat_layer
from xfdnn.tools.emu.identity_layer import identity_layer as _identity_layer
from xfdnn.tools.emu.pool_layer import pool_layer as _pool_layer
from xfdnn.tools.emu.reshape_layer import reshape_layer as _reshape_layer
from xfdnn.tools.emu.matop_layer import matop_layer as _matop_layer
from xfdnn.tools.emu.quantize_layer import quantize_layer as _quantize_layer, unquantize_layer as _unquantize_layer
from xfdnn.tools.emu.softmax_layer import softmax_layer as _softmax_layer
from xfdnn.tools.emu.relu_layer import relu_layer as _relu_layer
from xfdnn.tools.emu.batchnorm_layer import batchnorm_layer as _batchnorm_layer
from xfdnn.tools.emu.reduce_layer import reduce_layer as _reduce_layer
from xfdnn.tools.emu.fpga_pydot_layer import fpga_pydot_layer as _fpga_pydot_layer
from xfdnn.tools.emu.pool_hwemu_layer import pool_hwemu_layer
class available_layers():
layers = {
'Convolution': (_conv_layer, {'mode': 'NCHW'}),
'BiasAdd': (_matop_layer, {'optype': 'BiasAdd', 'mode': 'NCHW'}),
'Eltwise': (_eltwise_layer, {'operation': 'SUM', 'mode': 'NCHW'}), # TODO FIXME assumes add???
#'Mean': (_reduce_layer, {'type': 'AVG', 'mode': 'NCHW'}), # TODO FIXME assumes avgpool???
'Reshape': (_reshape_layer, {}),
'Scale': (_scale_layer, {'mode': 'NCHW'}),
'ReLU': (_relu_layer, {}),
'Pooling': (_pool_layer, {'mode': 'NCHW'}),
'Concat': (_concat_layer, {'axis': 1}),
'BatchNorm': (_batchnorm_layer, {}),
'InnerProduct': (_matop_layer, {'optype': 'MatMul'}),
# 'Mul': (_matop_layer, {'optype': 'MatMul'}),
'Sub': (_matop_layer, {'optype': 'Sub'}),
'Identity': (_identity_layer, {}),
'Dropout' : (_identity_layer, {}),
'Input' : (_identity_layer, {}),
'Output' : (_identity_layer, {}),
'Softmax': (_softmax_layer, {})
}
def _ret(self, name, kwargs={}):
layer, defaults = self.layers[name]
## update available_layers arguments based on kwargs
defaults.update(kwargs)
return layer(**defaults)
def __call__(self, name, **kwargs):
return self._ret(name, kwargs)
def __getitem__(self, name):
return self._ret(name)
def __contains__(self, other):
if isinstance(other, _string_types):
return other in self.layers
else:
return False
_available_layers = available_layers()
class CPUTransform:
def __init__(self, time_to_layer_list=None, layerparameter_dict=None, options=object(), native_graph=None, networkjson = None, weightdir = None, inps = None, outs = None):
self.variables = {}
self.constSet = set()
if networkjson == None :
self.orig_framework = options.base
self.native_graph = native_graph
self._layers = self.create_schedule(time_to_layer_list, layerparameter_dict, options)
else :
self._layers = self.create_compiled_schedule(networkjson, weightdir, inps)
def extract_sub_graph(self, nodes, layerparameter_dict):
sub_graph = set()
stk = list(nodes)
while len(stk) > 0:
node_name = stk.pop()
sub_graph.add(node_name)
if node_name in layerparameter_dict:
params = layerparameter_dict[node_name]
if params.bottoms is not None:
stk += [inp for inp in params.bottoms if inp not in sub_graph]
return sub_graph
def create_compiled_schedule(self, networkjson, weightdir, inps) :
weights = _listdir(weightdir)
weights = [_join(weightdir, wt) for wt in weights]
const = {}
for wtfile in weights :
with open(wtfile, 'r') as wts :
line = wts.readline()
toks = line.strip().split()
print len(toks)
if len(toks) > 4 :
print toks[0], toks[1], toks[2], toks[3], len(toks[4:])
if toks[0] not in const :
const[toks[0]] = {}
if "bias" in wtfile[wtfile.rfind('/'):] :
const[toks[0]]['bias'] = np.array([float(x) for x in toks[4:]])
else :
const[toks[0]]['weights'] = np.array([float(x) for x in toks[4:]])
schedule = []
for layer in networkjson['network'] :
if layer['type'] in _available_layers :
print layer['type'], layer['name'], layer['bottoms'], layer['type'] not in ['Convolution', 'InnerProduct'] or (layer['name'] in const and len(const[layer['name']]) == 2)
xdlf_layer = copy.deepcopy(_available_layers(layer['type'], mode='NCHW'))
if layer['name'] in const :
xdlf_layer.set_layer_params(layer, const[layer['name']])
else :
xdlf_layer.set_layer_params(layer)
schedule.append(xdlf_layer)
elif layer['name'] in inps :
print "Detected input : ", layer['name'], layer['type'], layer['outputshapes']
print schedule
return schedule
def create_schedule(self, time_to_layer_list, layerparameter_dict, options):
print("Creating schedule for \"CPU\"")
#print('processing layers: {}'.format(zip(*time_to_layer_list)[1]))
schedule = []
print('time, layer_type, layer_name, layer_inputs')
for t, layer_name in time_to_layer_list:
layer_params = layerparameter_dict[layer_name]
print('{:3d}, {:15s}, {:s}, {}'.format(t, layer_params.type[0], layer_params.name,
layer_params.bottoms))
if layer_params.type[0] == 'Const':
self.variables[layer_params.name] = layer_params.data
elif layer_params.type[0] in _available_layers:
## here mode is PFGA IP data_format (not the platform data_format, i.e., options.data_format)
layer = copy.deepcopy(_available_layers(layer_params.type[0], mode='NCHW'))
layer.set_params(layer_params, self.variables)
layer.name = layer_name
schedule.append(layer)
# elif layer_params.type[0] in ['Input', 'Output']:
# ## NOTE: This is to ignore unrecognized Input and Output nodes created by compiler with
# ## --cpulayermustgo flag
# pass
else:
layer = self.get_default_layer(layer_params)
layer.name = layer_name
schedule.append(layer)
self.constSet = set(self.variables.keys())
return schedule
def get_default_layer(self, layer_params):
if self.orig_framework.lower() == 'tf' :
## FIXME: Hack to by pass caffe and tensorflow co-existance issues
from layer_tf import layer_tf as _layer_tf
l = _layer_tf(layer_params.bottoms, layer_params.tops[0], self.native_graph, 'NCHW')
l.get_constant_inputs(self.constSet)
elif self.orig_framework.lower() == "caffe" :
## FIXME: Hack to by pass caffe and tensorflow co-existance issues
from layer_caffe import layer_caffe as _layer_caffe
l = _layer_caffe(layer_params.bottoms, layer_params.tops[0], self.native_graph, 'NCHW')
l.get_constant_inputs(self.constSet)
else :
print ("framework not yet supported")
return l
def getLayers(self):
return self._layers
def getLayerNames(self):
return [layer.output for layer in self._layers]
class FPGATransform (CPUTransform):
def __init__(self, time_to_layer_list, layerparameter_dict, compilerJson, options=object(),
native_graph=None, filename=None):
CPUTransform.__init__(self, time_to_layer_list, layerparameter_dict, options, native_graph)
print("Creating schedule for \"FPGA\"")
self.filename = filename
self.fpga_layer_cnt = 0
layerQuantMap = {l['name']: l for l in compilerJson['quantization']['network']}
boundryMap = {'inputs': compilerJson.get('inputs', []),
'outputs': compilerJson.get('outputs', [])}
## NOTE: each layer might have multiple commands due to gather and scatter
layerParameterMap = DefaultOrderedDict(list)
for l in compilerJson['network']:
layerParameterMap[l['name']] += [l]
xdnn_env = _xdnn_fpga_env(options.xclbin, quant_info=compilerJson['quantization'],
quant_cfgfile=options.quant_cfgfile,
isxdnnv3=(options.xdnnv3==True))
layersForFpga = {ol.output: ol for ol in self._layers}
# update schedule with new FPGA schedule
self._layers = self._make_fpga_layer(layersForFpga, layerParameterMap, layerQuantMap, boundryMap, xdnn_env)
def _make_fpga_layer(self, layersForFpga, layerParameterMap, layerQuantMap, boundryMap, xdnn_env):
compilerInfo = DefaultOrderedDict(dict)
for ol_name, layerParams in layerParameterMap.items():
ol = layersForFpga.get(ol_name, None)
compilerInfo[ol_name]['layerParameter'] = layerParameterMap[ol_name]
compilerInfo[ol_name]['layerQuant'] = layerQuantMap.get(ol_name, None)
compilerInfo[ol_name]['weights'] = ol.filter_weights if ol and hasattr(ol, "filter_weights") else None
compilerInfo[ol_name]['biases'] = ol.biases if ol and hasattr(ol, "biases") else None
l = _fpga_pydot_layer(compilerInfo=compilerInfo, boundryMap=boundryMap,
xdnn_env=xdnn_env, filename=self.filename)
l.name = 'fpga_pydot_layer_{}'.format(self.fpga_layer_cnt)
self.fpga_layer_cnt += 1
l.setInput([input['input_name'] for input in boundryMap['inputs']])
l.setOutput([output['previous_layers'][0] for output in boundryMap['outputs']])
return [l]
class HWEmuTransform(CPUTransform):
def __init__(self, time_to_layer=None, layer_param_dict=None, options=object(), native_graph=None, networkjson = None, weightdir = None, inps = None, outs = None, isV3 = True):
print("Creating schedule for \"HWEmu\"")
CPUTransform.__init__(self, time_to_layer, layer_param_dict, options, native_graph, networkjson, weightdir, inps, outs)
if networkjson :
recipe = {}
recipe['start'] = inps
recipe['end'] = outs
recipe['quantize'] = {inp:inp for inp in inps}
recipe['unquantize'] = {out:out for out in outs}
doQuantizeSubgraph = True
xdnnv3 = isV3
self.xdnn_env = _xdnn_env()
else :
recipeStr = options.quant_recipe
print recipeStr
recipe = json.loads(recipeStr)
doQuantizeSubgraph = False # flag that we need to quantize the subgraph
self.xdnn_env = None
if options.quant_cfgfile[-4:].lower() == 'json' :
self.xdnn_env = _xdnn_env(options.quant_cfgfile)
else :
self.xdnn_env = _xdnn_env()
xdnnv3 = options.xdnnv3
if xdnnv3 == True:
opFactorySelect = "hwEmuV3"
else:
opFactorySelect = "hwEmuV2"
self._is_var_quantized = set()
quantizedLayers = defaultdict(int) # for stats
newSchedule = [] # we will be building this
print recipe
print recipe['quantize']
print recipe['unquantize']
for i, ol in enumerate(self._layers):
origLayerName = ol.output
if origLayerName == recipe['start']:
# 'start' signal to quantize subgraph until 'end' signal
doQuantizeSubgraph = True
if origLayerName in recipe['quantize'] and ol.deephi_quantizations is None :
# inject quantize_layer
print("Start quantize subgraph @ {:s}".format(origLayerName))
quantizeKey = recipe['quantize'][origLayerName]
l = _quantize_layer(quantizeKey, self.xdnn_env)
l.setInput([ol.inputs[0]])
l.setOutput(self._quantized_varname(ol.inputs[0]))
newSchedule.append(l)
if doQuantizeSubgraph:
# substitute layers in quantized subgraph
if isinstance(ol, _conv_layer):
l = _factory.conv_factory(
opFactorySelect,
weights=ol.filter_weights,
stride=ol.conv_stride,
activation=ol.activation_fn,
padding_type=ol.padding_type,
paddings=ol.paddings,
biases=ol.biases,
mode=ol.mode,
quantize_key=ol.deephi_quantizations if ol.deephi_quantizations else origLayerName,
#quantize_key= origLayerName,
isV3=xdnnv3,
xdnn_env=self.xdnn_env)
elif isinstance(ol, _eltwise_layer):
l = _factory.eltwise_factory(
opFactorySelect,
operation=ol.operation,
activation=ol.activation,
mode=ol.mode,
quantize_key=ol.deephi_quantizations if ol.deephi_quantizations else origLayerName,
isV3=xdnnv3,
xdnn_env=self.xdnn_env)
elif isinstance(ol, _scale_layer):
l = _factory.scale_factory(
opFactorySelect,
alpha=ol.alpha,
beta=ol.beta,
activation=ol.activation,
mode=ol.mode,
quantize_key=ol.deephi_quantizations if ol.deephi_quantizations else origLayerName,
isV3=xdnnv3,
xdnn_env=self.xdnn_env)
elif isinstance(ol, _relu_layer):
l = ol
elif isinstance(ol, _pool_layer):
l = _factory.pool_factory(
opFactorySelect,
pool_type=ol.pool_type,
dim=ol.pool_kernel_dim,
stride=ol.pool_stride,
padding_type=ol.padding_type,
paddings=ol.paddings,
mode=ol.mode,
global_pooling=ol.global_pooling,
quantize_key=ol.deephi_quantizations if ol.deephi_quantizations else origLayerName,
xdnn_env=self.xdnn_env)
elif (isinstance(ol, _matop_layer)
and ol.optype == "Add"):
l = ol
elif (isinstance(ol, _concat_layer)
or (hasattr(ol, "op") and "Concat" in ol.op.type)):
l = ol
else:
raise NotImplementedError('unknown layer quantizer {:s} {:s}'.format((origLayerName,type(ol))))
quantizedLayers[type(ol)] += 1
# reroute to used quantized vars, then add to new schedule
l.setInput(list(map(self._quantized_varname, ol.inputs)))
l.setOutput(self._quantized_varname(origLayerName))
if ol.deephi_quantizations :
if doQuantizeSubgraph :
l.deephi_quantizations = ol.deephi_quantizations
else :
l.deephi_quantizations = None
newSchedule.append(l)
else:
# add new schedule as-is
newSchedule.append(ol)
if origLayerName in recipe['unquantize'] and ol.deephi_quantizations is None :
# inject unquantize_layer
print("End quantize subgraph @ {:s}".format(origLayerName))
quantizeKey = recipe['unquantize'][origLayerName]
l = _unquantize_layer(quantizeKey, self.xdnn_env)
l.setInput([self._quantized_varname(origLayerName)])
l.setOutput(origLayerName)
newSchedule.append(l)
if origLayerName == recipe['end']:
# 'end' signal to stop quantizing subgraph
doQuantizeSubgraph = False
print("Quantized layers:")
sortedQL = sorted(list(quantizedLayers.items()), key=operator.itemgetter(1))
print(pprint.pprint(sortedQL))
# update schedule with new quantized schedule
print newSchedule
self._layers = newSchedule
def _quantized_varname(self, x):
if x in self._is_var_quantized:
return x + '_quantized'
return x
|
Connectors/src/PostgreSql/scaffold/cloudfoundry.py | aTiKhan/Samples-1 | 643 | 11104628 | <reponame>aTiKhan/Samples-1<gh_stars>100-1000
from pysteel import cloudfoundry
def setup(context):
"""
:type context: behave.runner.Context
"""
cf = cloudfoundry.CloudFoundry(context)
# remove previous app
app = 'postgres-connector'
cf.delete_app(app)
# create service
service = 'postgresql-10-odb'
plan = 'standalone'
instance = 'myPostgres'
args = ['-c',
'\'{"db_name":"postgresample", "db_username":"steeltoe", "owner_name":"<NAME>", "owner_email":"<EMAIL>"}\'']
cf.create_service(service, plan, instance, args)
|
lldb/packages/Python/lldbsuite/test/lang/cpp/nested-class-other-compilation-unit/TestNestedClassWithParentInAnotherCU.py | medismailben/llvm-project | 2,338 | 11104645 | <gh_stars>1000+
"""
Test that the expression evaluator can access members of nested classes even if
the parents of the nested classes were imported from another compilation unit.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestNestedClassWithParentInAnotherCU(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_nested_class_with_parent_in_another_cu(self):
self.main_source_file = lldb.SBFileSpec("main.cpp")
self.build()
(_, _, thread, _) = lldbutil.run_to_source_breakpoint(self, "// break here", self.main_source_file)
frame = thread.GetSelectedFrame()
# Parse the DIEs of the parent classes and the nested classes from
# other.cpp's CU.
warmup_result = frame.EvaluateExpression("b")
self.assertTrue(warmup_result.IsValid())
# Inspect fields of the nested classes. This will reuse the types that
# were parsed during the evaluation above. By accessing a chain of
# fields, we try to verify that all the DIEs, reused types and
# declaration contexts were wired properly into lldb's parser's state.
expr_result = frame.EvaluateExpression("a.y.oY_inner.oX_inner")
self.assertTrue(expr_result.IsValid())
self.assertEqual(expr_result.GetValue(), "42")
|
trpo/fxn_approx.py | DanielTakeshi/rl_algorithms | 150 | 11104654 | <reponame>DanielTakeshi/rl_algorithms<filename>trpo/fxn_approx.py
"""
This will make some function approximators that we can use, particularly: linear
and neural network value functions. Instantiate instances of these in other
pieces of the code base.
(c) April 2017 by <NAME>, built upon `starter code` from <NAME>.
"""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.distributions as distr
import sys
if "../" not in sys.path:
sys.path.append("../")
from utils import utils_pg as utils
np.set_printoptions(edgeitems=100)
class LinearValueFunction(object):
""" Estimates the baseline function for PGs via ridge regression. """
coef = None
def fit(self, X, y):
"""
Updates weights (self.coef) with design matrix X (i.e. observations) and
targets (i.e. actual returns) y.
"""
assert X.shape[0] == y.shape[0]
assert len(y.shape) == 1
Xp = self.preproc(X)
A = Xp.T.dot(Xp)
nfeats = Xp.shape[1]
A[np.arange(nfeats), np.arange(nfeats)] += 1e-3 # a little ridge regression
b = Xp.T.dot(y)
self.coef = np.linalg.solve(A, b)
def predict(self, X):
""" Predicts return from observations (i.e. environment states) X. """
if self.coef is None:
return np.zeros(X.shape[0])
else:
return self.preproc(X).dot(self.coef)
def preproc(self, X):
""" Adding a bias column, and also adding squared values (huh). """
return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X)/2.0], axis=1)
class NnValueFunction(object):
""" Estimates the baseline function for PGs via neural network. """
def __init__(self, session, ob_dim=None, n_epochs=10, stepsize=1e-3):
"""
They provide us with an ob_dim in the code so I assume we can use it;
makes it easy to define the layers anyway. This gets constructed upon
initialization so future calls to self.fit should remember this. I
actually use the pre-processed version, though.
"""
self.n_epochs = n_epochs
self.lrate = stepsize
self.sy_ytarg = tf.placeholder(shape=[None], name="nnvf_y", dtype=tf.float32)
self.sy_ob_no = tf.placeholder(shape=[None, ob_dim+1], name="nnvf_ob", dtype=tf.float32)
self.sy_h1 = utils.lrelu(utils.dense(self.sy_ob_no, 32, "nnvf_h1", weight_init=utils.normc_initializer(1.0)), leak=0.0)
self.sy_h2 = utils.lrelu(utils.dense(self.sy_h1, 32, "nnvf_h2", weight_init=utils.normc_initializer(1.0)), leak=0.0)
self.sy_final_n = utils.dense(self.sy_h2, 1, "nnvf_final", weight_init=utils.normc_initializer(1.0))
self.sy_ypred = tf.reshape(self.sy_final_n, [-1])
self.sy_l2_error = tf.reduce_mean(tf.square(self.sy_ypred - self.sy_ytarg))
self.fit_op = tf.train.AdamOptimizer(stepsize).minimize(self.sy_l2_error)
self.sess = session
def fit(self, X, y):
""" Updates weights (self.coef) with design matrix X (i.e. observations)
and targets (i.e. actual returns) y. NOTE! We now return a dictionary
`out` so that we can provide information relevant information for the
logger.
"""
assert X.shape[0] == y.shape[0]
assert len(y.shape) == 1
out = {}
out["PredStdevBefore"]= self.predict(X).std()
Xp = self.preproc(X)
for i in range(self.n_epochs):
_,err = self.sess.run(
[self.fit_op, self.sy_l2_error],
feed_dict={self.sy_ob_no: Xp,
self.sy_ytarg: y
})
if i == 0:
out["MSEBefore"] = np.sqrt(err)
if i == self.n_epochs-1:
out["MSEAfter"] = np.sqrt(err)
out["PredStdevAfter"] = self.predict(X).std()
out["TargStdev"] = y.std()
return out
def predict(self, X):
"""
Predicts returns from observations (i.e. environment states) X. I also
think we need a session here. No need to expand dimensions, BTW! It's
effectively already done for us elsewhere.
"""
Xp = self.preproc(X)
return self.sess.run(self.sy_ypred, feed_dict={self.sy_ob_no:Xp})
def preproc(self, X):
""" Let's add this here to increase dimensionality. """
#return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X)/2.0], axis=1)
return np.concatenate([np.ones([X.shape[0], 1]), X], axis=1)
|
tests/cupyx_tests/scipy_tests/special_tests/test_erf.py | svlandeg/cupy | 6,180 | 11104658 | import unittest
import numpy
import cupy
from cupy import testing
import cupyx.scipy.special # NOQA
def _boundary_inputs(boundary, rtol, atol):
left = boundary * (1 - numpy.copysign(rtol, boundary)) - atol
right = boundary * (1 + numpy.copysign(rtol, boundary)) + atol
return [left, boundary, right]
class _TestBase(object):
def test_erf(self):
self.check_unary('erf')
def test_erfc(self):
self.check_unary('erfc')
def test_erfcx(self):
self.check_unary('erfcx')
@testing.with_requires('scipy>=1.4.0')
def test_erfinv(self):
self.check_unary('erfinv')
self.check_unary_random('erfinv', scale=2, offset=-1)
self.check_unary_boundary('erfinv', boundary=-1)
self.check_unary_boundary('erfinv', boundary=1)
@testing.with_requires('scipy>=1.4.0')
def test_erfcinv(self):
self.check_unary('erfcinv')
self.check_unary_random('erfcinv', scale=2, offset=0)
self.check_unary_boundary('erfcinv', boundary=0)
self.check_unary_boundary('erfcinv', boundary=2)
@testing.gpu
@testing.with_requires('scipy')
class TestSpecial(unittest.TestCase, _TestBase):
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary(self, name, xp, scp, dtype):
import scipy.special # NOQA
a = testing.shaped_arange((2, 3), xp, dtype)
return getattr(scp.special, name)(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_random(self, name, xp, scp, dtype, scale, offset):
import scipy.special # NOQA
a = testing.shaped_random((2, 3), xp, dtype, scale=scale) + offset
return getattr(scp.special, name)(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_boundary(self, name, xp, scp, dtype, boundary):
import scipy.special # NOQA
a = _boundary_inputs(boundary, 1.0 / 1024, 1.0 / 1024)
a = xp.array(a, dtype=dtype)
return getattr(scp.special, name)(a)
@testing.with_requires('scipy>=1.4.0')
@testing.for_dtypes(['f', 'd'])
def test_erfinv_behavior(self, dtype):
a = cupy.empty((1,), dtype=dtype)
a[:] = 1.0 + 1E-6
a = cupyx.scipy.special.erfinv(a)
assert cupy.isnan(a)
a[:] = -1.0 - 1E-6
a = cupyx.scipy.special.erfinv(a)
assert cupy.isnan(a)
a[:] = 1.0
a = cupyx.scipy.special.erfinv(a)
assert numpy.isposinf(cupy.asnumpy(a))
a[:] = -1.0
a = cupyx.scipy.special.erfinv(a)
assert numpy.isneginf(cupy.asnumpy(a))
@testing.with_requires('scipy>=1.4.0')
@testing.for_dtypes(['f', 'd'])
def test_erfcinv_behavior(self, dtype):
a = cupy.empty((1,), dtype=dtype)
a[:] = 2.0 + 1E-6
a = cupyx.scipy.special.erfcinv(a)
assert cupy.isnan(a)
a[:] = 0.0 - 1E-6
a = cupyx.scipy.special.erfcinv(a)
assert cupy.isnan(a)
a[:] = 0.0
a = cupyx.scipy.special.erfcinv(a)
assert numpy.isposinf(cupy.asnumpy(a))
a[:] = 2.0
a = cupyx.scipy.special.erfcinv(a)
assert numpy.isneginf(cupy.asnumpy(a))
@testing.gpu
@testing.with_requires('scipy')
class TestFusionSpecial(unittest.TestCase, _TestBase):
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary(self, name, xp, scp, dtype):
import scipy.special # NOQA
a = testing.shaped_arange((2, 3), xp, dtype)
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_random(self, name, xp, scp, dtype, scale, offset):
import scipy.special # NOQA
a = testing.shaped_random((2, 3), xp, dtype, scale=scale) + offset
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_boundary(self, name, xp, scp, dtype, boundary):
import scipy.special # NOQA
a = _boundary_inputs(boundary, 1.0 / 1024, 1.0 / 1024)
a = xp.array(a, dtype=dtype)
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
|
test/interop/python/multilocale/use_checkMultipleLocales.py | jhh67/chapel | 1,602 | 11104665 | <reponame>jhh67/chapel
import checkMultipleLocales
checkMultipleLocales.chpl_setup(4)
checkMultipleLocales.foo(2)
checkMultipleLocales.chpl_cleanup()
|
platypush/message/response/chat/telegram.py | RichardChiang/platypush | 228 | 11104723 | <gh_stars>100-1000
import datetime
from typing import Optional, List
from platypush.message.response import Response
class TelegramMessageResponse(Response):
def __init__(self,
message_id: int,
chat_id: int,
creation_date: Optional[datetime.datetime],
chat_username: Optional[str] = None,
chat_firstname: Optional[str] = None,
chat_lastname: Optional[str] = None,
from_user_id: Optional[int] = None,
from_username: Optional[str] = None,
from_firstname: Optional[str] = None,
from_lastname: Optional[str] = None,
text: Optional[str] = None,
caption: Optional[str] = None,
edit_date: Optional[datetime.datetime] = None,
forward_date: Optional[datetime.datetime] = None,
forward_from_message_id: Optional[int] = None,
photo_file_id: Optional[str] = None,
photo_file_size: Optional[int] = None,
photo_width: Optional[int] = None,
photo_height: Optional[int] = None,
document_file_id: Optional[str] = None,
document_file_name: Optional[str] = None,
document_file_size: Optional[str] = None,
document_mime_type: Optional[str] = None,
audio_file_id: Optional[str] = None,
audio_file_size: Optional[str] = None,
audio_mime_type: Optional[str] = None,
audio_performer: Optional[str] = None,
audio_title: Optional[str] = None,
audio_duration: Optional[str] = None,
location_latitude: Optional[float] = None,
location_longitude: Optional[float] = None,
contact_phone_number: Optional[str] = None,
contact_first_name: Optional[str] = None,
contact_last_name: Optional[str] = None,
contact_user_id: Optional[int] = None,
contact_vcard: Optional[str] = None,
video_file_id: Optional[str] = None,
video_file_size: Optional[int] = None,
video_width: Optional[int] = None,
video_height: Optional[int] = None,
video_mime_type: Optional[str] = None,
video_duration: Optional[str] = None,
link: Optional[str] = None,
media_group_id: Optional[int] = None,
*args, **kwargs):
super().__init__(*args, output={
'message_id': message_id,
'chat_id': chat_id,
'chat_username': chat_username,
'chat_firstname': chat_firstname,
'chat_lastname': chat_lastname,
'from_user_id': from_user_id,
'from_username': from_username,
'from_firstname': from_firstname,
'from_lastname': from_lastname,
'text': text,
'caption': caption,
'creation_date': creation_date,
'edit_date': edit_date,
'forward_from_message_id': forward_from_message_id,
'forward_date': forward_date,
'photo_file_id': photo_file_id,
'photo_file_size': photo_file_size,
'photo_width': photo_width,
'photo_height': photo_height,
'document_file_id': document_file_id,
'document_file_name': document_file_name,
'document_file_size': document_file_size,
'document_mime_type': document_mime_type,
'audio_file_id': audio_file_id,
'audio_file_size': audio_file_size,
'audio_performer': audio_performer,
'audio_title': audio_title,
'audio_duration': audio_duration,
'audio_mime_type': audio_mime_type,
'video_file_id': video_file_id,
'video_file_size': video_file_size,
'video_width': video_width,
'video_height': video_height,
'video_duration': video_duration,
'video_mime_type': video_mime_type,
'link': link,
'location_latitude': location_latitude,
'location_longitude': location_longitude,
'contact_phone_number': contact_phone_number,
'contact_first_name': contact_first_name,
'contact_last_name': contact_last_name,
'contact_user_id': contact_user_id,
'contact_vcard': contact_vcard,
'media_group_id': media_group_id,
}, **kwargs)
class TelegramFileResponse(Response):
def __init__(self,
file_id: str,
file_path: str,
file_size: int,
*args, **kwargs):
super().__init__(*args, output={
'file_id': file_id,
'file_path': file_path,
'file_size': file_size,
}, **kwargs)
class TelegramChatResponse(Response):
# noinspection PyShadowingBuiltins
def __init__(self,
chat_id: int,
link: str,
username: str,
invite_link: Optional[str],
title: Optional[str] = None,
description: Optional[str] = None,
type: Optional[str] = None,
first_name: Optional[str] = None,
last_name: Optional[str] = None,
*args, **kwargs):
super().__init__(*args, output={
'chat_id': chat_id,
'link': link,
'invite_link': invite_link,
'username': username,
'title': title,
'description': description,
'type': type,
'first_name': first_name,
'last_name': last_name,
}, **kwargs)
class TelegramUserResponse(Response):
# noinspection PyShadowingBuiltins
def __init__(self,
user_id: int,
username: str,
is_bot: bool,
first_name: str,
last_name: Optional[str] = None,
language_code: Optional[str] = None,
link: Optional[str] = None,
*args, **kwargs):
super().__init__(*args, output={
'user_id': user_id,
'username': username,
'is_bot': is_bot,
'link': link,
'language_code': language_code,
'first_name': first_name,
'last_name': last_name,
}, **kwargs)
class TelegramUsersResponse(Response):
# noinspection PyShadowingBuiltins
def __init__(self,
users: List[TelegramUserResponse],
*args, **kwargs):
super().__init__(*args, output=[user.output for user in users], **kwargs)
# vim:sw=4:ts=4:et:
|
tests/node_converters/concat_test.py | niobeus/onnx2torch | 144 | 11104725 | <filename>tests/node_converters/concat_test.py
from itertools import product
from typing import List
import numpy as np
import onnx
from onnx.helper import make_tensor_value_info
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
from tests.utils.common import check_onnx_model
from tests.utils.common import make_model_from_nodes
def _test_concat(
input_arrays_shapes: List[List[int]],
opset_version: int,
**kwargs,
) -> None:
test_inputs = {}
for i, input_array_shape in enumerate(input_arrays_shapes):
x = np.random.uniform(low=-1.0, high=1.0, size=input_array_shape).astype(np.float32)
node_name = f'x_{i}'
test_inputs[node_name] = x
node = onnx.helper.make_node(
'Concat',
inputs=list(test_inputs),
outputs=['y'],
**kwargs,
)
onnx_type = NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')]
outputs_info = [make_tensor_value_info(name='y', elem_type=onnx_type, shape=None)]
model = make_model_from_nodes(
nodes=node,
initializers={},
inputs_example=test_inputs,
outputs_info=outputs_info,
opset_version=opset_version,
)
check_onnx_model(model, test_inputs)
def test_concat() -> None:
opset_variants = (9, 13)
axis_variants = (0, 1)
for opset_version, axis in product(opset_variants, axis_variants):
_test_concat(
input_arrays_shapes=[[1, 3, 16, 16], [1, 3, 16, 16], [1, 3, 16, 16]],
axis=axis,
opset_version=opset_version,
)
|
anki_vector/messaging/__init__.py | rmcolbert/vector-python-sdk | 516 | 11104794 | <reponame>rmcolbert/vector-python-sdk<gh_stars>100-1000
# Copyright (c) 2018 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Protobuf and gRPC messages exposed to the Vector Python SDK.
.. warning::
This package is provided to understand the messages passed between the SDK and Vector,
and it should not be necessary for writing code that uses the SDK.
.. code-block::
python
from anki_vector.messaging import client, protocol
async def send_version_request(interface: client.ExternalInterfaceStub, client_version, min_host_version):
\"\"\"This function needs to be executed and awaited in the same event loop
as the interface is created.
\"\"\"
# Create a protocol version request message
version = protocol.ProtocolVersionRequest(client_version=client_version,
min_host_version=min_host_version)
# Send the protocol version to the external interface and await the result
protocol_version = await interface.ProtocolVersion(version)
For information about individual messages and their parameters, see :doc:`the protobuf documentation </proto>`.
"""
from . import protocol
from . import client
__all__ = ['protocol', 'client']
|
{{cookiecutter.project_slug}}/backend/app/app/main.py | Gjacquenot/full-stack-fastapi-couchbase | 353 | 11104829 | from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core import config
app = FastAPI(title=config.PROJECT_NAME, openapi_url="/api/v1/openapi.json")
# CORS
origins = []
# Set all CORS enabled origins
if config.BACKEND_CORS_ORIGINS:
origins_raw = config.BACKEND_CORS_ORIGINS.split(",")
for origin in origins_raw:
use_origin = origin.strip()
origins.append(use_origin)
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
),
app.include_router(api_router, prefix=config.API_V1_STR)
|
python/tests/wrapper/test_10_attach_model.py | MelLain/bigartm | 638 | 11104836 | <filename>python/tests/wrapper/test_10_attach_model.py
# Copyright 2017, Additive Regularization of Topic Models.
from __future__ import print_function
import os
import tempfile
import shutil
import pytest
from six.moves import range, zip
import artm.wrapper
import artm.wrapper.messages_pb2 as messages
import artm.wrapper.constants as constants
import artm.master_component as mc
def test_func():
# Set some constants
data_path = os.environ.get('BIGARTM_UNITTEST_DATA')
dictionary_name = 'dictionary'
pwt = 'pwt'
nwt = 'nwt'
docword = 'docword.kos.txt'
vocab = 'vocab.kos.txt'
num_topics = 10
num_document_passes = 1
num_outer_iterations = 5
index_to_zero = 4
zero_tol = 1e-37
batches_folder = tempfile.mkdtemp()
try:
# Create the instance of low-level API and master object
lib = artm.wrapper.LibArtm()
# Parse collection from disk
lib.ArtmParseCollection({'format': constants.CollectionParserConfig_CollectionFormat_BagOfWordsUci,
'docword_file_path': os.path.join(data_path, docword),
'vocab_file_path': os.path.join(data_path, vocab),
'target_folder': batches_folder})
# Create master component and scores
scores = {'ThetaSnippet': messages.ThetaSnippetScoreConfig()}
master = mc.MasterComponent(lib, scores=scores)
# Create collection dictionary and import it
master.gather_dictionary(dictionary_target_name=dictionary_name,
data_path=batches_folder,
vocab_file_path=os.path.join(data_path, vocab))
# Initialize model
master.initialize_model(model_name=pwt,
topic_names=['topic_{}'.format(i) for i in range(num_topics)],
dictionary_name=dictionary_name)
# Attach Pwt matrix
topic_model, numpy_matrix = master.attach_model(pwt)
numpy_matrix[:, index_to_zero] = 0
# Perform iterations
for iter in range(num_outer_iterations):
master.clear_score_cache()
master.process_batches(pwt, nwt, num_document_passes, batches_folder)
master.normalize_model(pwt, nwt)
theta_snippet_score = master.get_score('ThetaSnippet')
print('ThetaSnippetScore.')
# Note that 5th topic is fully zero; this is because we performed "numpy_matrix[:, 4] = 0".
snippet_tuples = zip(theta_snippet_score.values, theta_snippet_score.item_id)
print_string = ''
for values, item_id in snippet_tuples:
print_string += 'Item# {0}:\t'.format(item_id)
for index, value in enumerate(values.value):
if index == index_to_zero:
assert value < zero_tol
print_string += '{0:.3f}\t'.format(value)
print(print_string)
print_string = ''
finally:
shutil.rmtree(batches_folder)
|
audio_distance.py | mbinkowski/DeepSpeechDistances | 114 | 11104847 | <filename>audio_distance.py
"""
Main Audio distance computation module.
"""
import os
import time
import numpy as np
from glob import glob
import tensorflow.compat.v1 as tf
from tqdm import tqdm
from tensorflow_gan.python.eval.classifier_metrics import kernel_classifier_distance_and_std_from_activations as kernel_dist
from tensorflow_gan.python.eval.classifier_metrics import frechet_classifier_distance_from_activations as frechet_dist
from preprocessing import create_feed_dict
LOAD_PATH = './checkpoint/-54800'
META_PATH = './checkpoint/collection-stripped-meta.meta'
class AudioDistance(object):
"""Main DeepSpeech Distance evaluation class."""
def __init__(self,
load_path=LOAD_PATH,
meta_path=META_PATH,
keep_features=True,
required_sample_size=10000,
num_splits=5,
do_kdsd=True,
do_conditional_dsds=True,
sample_freq=24000):
"""
args:
load_path: Path to DeepSpeech2 model checkpoint.
meta_path: Path to DeepSpeech2 meta graph file.
keep_features: If True, reference and benchmark features will be kept in
memory for faster evaluation of future samples.
required_sample_size: Mimimum sample size required for computation.
Double of this number of samples is required from reference (real
data) sample to compute benchmark.
num_splits: Computation of FDSD and cFDSD will compute mean and std of
distance based on results from this number of independent runs.
do_kdsd: If True, Kernel distances (KDSD, cKDSD) will also be computed.
do_conditional_dsds: If True, conditional distances will be computed.
sample_freq: Audio sample frequency.
"""
self.load_path = load_path
self.meta_path = meta_path
self.batch_size = 16 # Fixed in DeepSpeech2 graph.
self.keep_features = keep_features
self.kept_features = {}
self.do_kdsd = do_kdsd
self.do_conditional_dsds = do_conditional_dsds
self.sample_freq = sample_freq
self.input_tensors = [
'IteratorGetNext:0', 'IteratorGetNext:1', 'IteratorGetNext:2']
self.output_tensor = 'ForwardPass/ds2_encoder/Reshape_2:0'
self._restored = False
mult = num_splits * self.batch_size
if required_sample_size // mult < 1:
raise ValueError(f"Too small sample size ({required_sample_size}) for "
f"given batch size ({self.batch_size}) and number of "
f"splits ({num_splits}.")
self.required_sample_size = (required_sample_size // mult) * mult
self.saver = tf.train.import_meta_graph(meta_path)
self.sess_config = tf.ConfigProto(allow_soft_placement=True)
self.sess_config.gpu_options.allow_growth = True
shape = (self.required_sample_size, 1600)
self.ref_features = tf.placeholder(
tf.float32, shape=shape, name='ref_features')
self.eval_features = tf.placeholder(
tf.float32, shape=shape, name='eval_features')
zipped = zip(tf.split(self.ref_features, num_splits),
tf.split(self.eval_features, num_splits))
dists = [frechet_dist(ref, ev) for ref, ev in zipped]
self.dists = [(tf.reduce_mean(dists), tf.math.reduce_std(dists))]
if self.do_kdsd:
self.dists += [kernel_dist(self.ref_features, self.eval_features,
dtype=tf.float32)]
self.real_data = None
self.real_data_benchmarks = None
def _load_from_pattern(self, pattern, assert_limit=None):
if assert_limit:
assert_limit = max(self.required_sample_size, assert_limit)
def _check_and_cut2limit(x):
if not assert_limit:
return x
if len(x) < assert_limit:
raise ValueError(
f"Not enough samples provided ({len(x)}), required: {assert_limit}.")
return x[:assert_limit]
if isinstance(pattern, np.ndarray):
# pattern is already an array
return _check_and_cut2limit(pattern)
if isinstance(pattern, list):
# pattern is a list
exts = list(np.unique([f[-4:] for f in pattern]))
if not (len(exts) == 1 and exts[0] in ['.npy', '.wav']):
raise ValueError("All provided files should be of the same type, "
f"either '.npy' or '.wav', got {str(exts)}.")
files = pattern
elif isinstance(pattern, str):
# pattern is a string
if pattern[-4:] not in ['.npy', '.wav']:
raise ValueError(f"Wrong filename pattern: {pattern}. Only '.npy' and "
"'.wav' files are supported.")
files = glob(pattern)
else:
raise ValueError("Wrong type. Only string, list and arry inputs are "
f"supported, got {str(type(pattern))}.")
if files[0][-4:] == '.npy':
# npy case
files_ = []
for f in files:
with open(f, 'r') as numpy_file:
files_.append(np.load(numpy_file))
array = np.concatenate(files_)
return _check_and_cut2limit(array)
# .wav case. Returning a list.
return _check_and_cut2limit(files)
def load_real_data(self, pattern):
"""Loads real data from a regex pattern.
Args:
pattern: regular expression to locate the data files. Audio needs to be
stored in .wav or .npy files.
"""
self.real_data = self._load_from_pattern(
pattern, assert_limit=2*self.required_sample_size)
def _restore_graph(self, sess):
if not self._restored:
self.saver.restore(sess, self.load_path)
self._restored = True
print('Checkpoint restored.')
def _split_to_batches(self, x):
bs = self.batch_size
return [x[k * bs: (k+1) * bs] for k in range(len(x) // bs)]
def _has_reference_features(self):
return 'ref' in self.kept_features
def _has_benchmark_features(self):
return 'benchmark' in self.kept_features
def get_features(self, sess=None, files=None):
"""Computes DeepSpeech features for audio from source files.
Args:
sess: tf.Session object or None.
files: None or regex pattern to load the data from. If None, features for
reference data will be computed.
Returns:
numpy array of features for the given data files.
"""
doing_reference = (files is None)
if doing_reference:
# Reference features.
if self._has_reference_features():
return self.kept_features['ref']
# The first half (self.required_sample_size clips) has the same
# conditioning as the evaluated samples, the second half -- different.
files = self.real_data
desc = 'Extracting DeepSpeech features from reference samples'
else:
# Evaluated features (which could still be real data).
files = self._load_from_pattern(files,
assert_limit=self.required_sample_size)
desc = 'Extracting DeepSpeech features from samples to evaluate'
features = []
if sess is None:
sess = tf.Session(config=self.sess_config)
self._restore_graph(sess)
t0 = time.time()
for idx, file_batch in enumerate(tqdm(self._split_to_batches(files),
desc=desc,
unit_scale=self.batch_size)):
feed_dict = create_feed_dict(file_batch,
handles=self.input_tensors,
sample_freq=self.sample_freq)
values = sess.run(self.output_tensor, feed_dict=feed_dict)
features.append(values.mean(axis=1))
features_ = np.concatenate(features, axis=0)
if doing_reference and self.keep_features:
if not self._has_reference_features():
# keep reference features for future evaluations
self.kept_features['ref'] = features_
if not self._has_benchmark_features():
# keep benchmark features for future evaluations
self.kept_features['benchmark'] = np.split(features_, 2)[0]
print('DeepSpeech2: finished evaluating features, total time'
'%.1fs', time.time() - t0)
return features_
def get_distance(self, sess=None, files=None):
"""Main function computing DeepSpeech distances.
Args:
sess: None or tf.Session object.
files: None or regex pattern with data files to compute distance againts.
If None, distances for benchmark data will be computed.
Returns:
A list of tuples (distance, std) of distances in the following order:
FDSD, KDSD, cFDSD, cKDSD. If self.do_kdsd is False, Kernel distances will
be skipped. If do_conditional_dsds is False, conditional distances will
be skipped.
"""
doing_real_data_benchmark = (files is None)
if doing_real_data_benchmark:
# use the latter part of real wav files for real-data benchmark
if self.real_data_benchmarks is not None:
return self.real_data_benchmarks
elif self._has_benchmark_features() and self._has_reference_features():
ref_features_ = [self.kept_features['ref']]
eval_features_ = [self.kept_features['benchmark']]
else:
# Evaluate reference features with same conditioning as samples.
files = self.real_data[:self.required_sample_size]
else:
files = self._load_from_pattern(files)
if sess is None:
sess = tf.Session(config=self.sess_config)
if files is not None:
# Reference features contains 2*self.required_sample_size clips with same
# and different conditioning.
ref_features_ = self.get_features(sess=sess, files=None)
eval_features_ = self.get_features(sess=sess, files=files)
ref_features_same_cond, ref_features_other_cond = np.split(
ref_features_, 2)
print('AudioDistance: got features from both samples, computing '
'metrics...')
t0 = time.time()
dist_vals = sess.run(self.dists,
feed_dict={self.ref_features: ref_features_other_cond,
self.eval_features: eval_features_})
print('AudioDistance: computed metrics from features '
'in %.1fs.', time.time() - t0)
if doing_real_data_benchmark:
self.real_data_benchmarks = dist_vals
if self.keep_features and (not self._has_benchmark_features()):
self.kept_features['benchmark'] = eval_features_
if self.do_conditional_dsds:
print('Evaluation with the same conditioning.')
t0 = time.time()
dist_vals += sess.run(
self.dists, feed_dict={self.ref_features: ref_features_same_cond,
self.eval_features: eval_features_})
print('AudioDistance: computed metrics from features '
'in %.1fs.', time.time() - t0)
print('AudioDistance: finished evaluation.')
return dist_vals
|
src/curt/curt/module.py | sanyaade-teachings/cep | 108 | 11104875 | """
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by <NAME> <<EMAIL>>, 2021
"""
class Module:
def __init__(self, module_type, config_channel, task_channel, output_channel, worker_list, load):
self.module_type = module_type
self.config_channel = config_channel
self.task_channel = task_channel
self.output_channel = output_channel
self.worker_list = worker_list
self.load = load |
caffe2/python/operator_test/resize_op_test.py | KevinKecc/caffe2 | 585 | 11104918 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core
from hypothesis import given
class TestResize(hu.HypothesisTestCase):
@given(height_scale=st.floats(0.25, 4.0) | st.just(2.0),
width_scale=st.floats(0.25, 4.0) | st.just(2.0),
height=st.integers(4, 32),
width=st.integers(4, 32),
num_channels=st.integers(1, 4),
batch_size=st.integers(1, 4),
seed=st.integers(0, 65535),
**hu.gcs)
def test_nearest(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed,
gc, dc):
np.random.seed(seed)
op = core.CreateOperator(
"ResizeNearest",
["X"],
["Y"],
width_scale=width_scale,
height_scale=height_scale,
)
X = np.random.rand(
batch_size, num_channels, height, width).astype(np.float32)
def ref(X):
output_height = np.int32(height * height_scale)
output_width = np.int32(width * width_scale)
output_h_idxs, output_w_idxs = np.meshgrid(np.arange(output_height),
np.arange(output_width),
indexing='ij')
input_h_idxs = np.minimum(
output_h_idxs / height_scale, height - 1).astype(np.int32)
input_w_idxs = np.minimum(
output_w_idxs / width_scale, width - 1).astype(np.int32)
Y = X[:, :, input_h_idxs, input_w_idxs]
return Y,
self.assertReferenceChecks(gc, op, [X], ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=0.1, threshold=1e-2)
@given(height_scale=st.floats(0.25, 4.0) | st.just(2.0),
width_scale=st.floats(0.25, 4.0) | st.just(2.0),
height=st.integers(4, 32),
width=st.integers(4, 32),
num_channels=st.integers(1, 4),
batch_size=st.integers(1, 4),
seed=st.integers(0, 65535),
**hu.gcs)
def test_nearest_grad(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed, gc, dc):
np.random.seed(seed)
output_height = np.int32(height * height_scale)
output_width = np.int32(width * width_scale)
X = np.random.rand(batch_size,
num_channels,
height,
width).astype(np.float32)
dY = np.random.rand(batch_size,
num_channels,
output_height,
output_width).astype(np.float32)
op = core.CreateOperator(
"ResizeNearestGradient",
["dY", "X"],
["dX"],
width_scale=width_scale,
height_scale=height_scale,
)
def ref(dY, X):
dX = np.zeros_like(X)
for i in range(output_height):
for j in range(output_width):
input_i = np.minimum(i / height_scale, height - 1).astype(np.int32)
input_j = np.minimum(j / width_scale, width - 1).astype(np.int32)
dX[:, :, input_i, input_j] += dY[:, :, i, j]
return dX,
self.assertDeviceChecks(dc, op, [dY, X], [0])
self.assertReferenceChecks(gc, op, [dY, X], ref)
if __name__ == "__main__":
unittest.main()
|
examples/pytorch/sagpool/grid_search.py | ketyi/dgl | 9,516 | 11104932 | <reponame>ketyi/dgl
import json
import os
from copy import deepcopy
from main import main, parse_args
from utils import get_stats
def load_config(path="./grid_search_config.json"):
with open(path, "r") as f:
return json.load(f)
def run_experiments(args):
res = []
for i in range(args.num_trials):
print("Trial {}/{}".format(i + 1, args.num_trials))
acc, _ = main(args)
res.append(acc)
mean, err_bd = get_stats(res, conf_interval=True)
return mean, err_bd
def grid_search(config:dict):
args = parse_args()
results = {}
for d in config["dataset"]:
args.dataset = d
best_acc, err_bd = 0., 0.
best_args = vars(args)
for arch in config["arch"]:
args.architecture = arch
for hidden in config["hidden"]:
args.hid_dim = hidden
for pool_ratio in config["pool_ratio"]:
args.pool_ratio = pool_ratio
for lr in config["lr"]:
args.lr = lr
for weight_decay in config["weight_decay"]:
args.weight_decay = weight_decay
acc, bd = run_experiments(args)
if acc > best_acc:
best_acc = acc
err_bd = bd
best_args = deepcopy(vars(args))
args.output_path = "./output"
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
args.output_path = "./output/{}.log".format(d)
result = {
"params": best_args,
"result": "{:.4f}({:.4f})".format(best_acc, err_bd)
}
with open(args.output_path, "w") as f:
json.dump(result, f, sort_keys=True, indent=4)
grid_search(load_config())
|
tensorforce/core/parameters/parameter.py | DLPerf/tensorforce | 1,132 | 11104938 | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import TensorforceError
from tensorforce.core import Module, SignatureDict, TensorSpec, tf_function, tf_util
class Parameter(Module):
"""
Base class for dynamic hyperparameters.
Args:
unit ("timesteps" | "episodes" | "updates"): Unit of parameter schedule
(<span style="color:#00C000"><b>default</b></span>: timesteps).
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
dtype (type): <span style="color:#0000C0"><b>internal use</b></span>.
shape (iter[int > 0]): <span style="color:#0000C0"><b>internal use</b></span>.
min_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>.
max_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, unit='timesteps', name=None, dtype=None, shape=(), min_value=None, max_value=None
):
super().__init__(name=name)
assert unit in (None, 'timesteps', 'episodes', 'updates')
self.unit = unit
self.spec = TensorSpec(type=dtype, shape=shape, min_value=min_value, max_value=max_value)
assert self.min_value() is None or self.max_value() is None or \
self.min_value() <= self.max_value()
if self.spec.min_value is not None:
if self.min_value() is None:
raise TensorforceError.value(
name=self.name, argument='lower bound', value=self.min_value(),
hint=('not >= {}'.format(self.spec.min_value))
)
elif self.min_value() < self.spec.min_value:
raise TensorforceError.value(
name=self.name, argument='lower bound', value=self.min_value(),
hint=('< {}'.format(self.spec.min_value))
)
if self.spec.max_value is not None:
if self.max_value() is None:
raise TensorforceError.value(
name=self.name, argument='upper bound', value=self.max_value(),
hint=('not <= {}'.format(self.spec.max_value))
)
elif self.max_value() > self.spec.max_value:
raise TensorforceError.value(
name=self.name, argument='upper bound', value=self.max_value(),
hint=('> {}'.format(self.spec.max_value))
)
def min_value(self):
return None
def max_value(self):
return None
def is_constant(self, *, value=None):
if value is None:
if self.min_value() is not None and self.min_value() == self.max_value():
assert self.final_value() == self.min_value()
assert isinstance(self.final_value(), self.spec.py_type())
return self.final_value()
else:
return None
else:
assert isinstance(value, self.spec.py_type())
if self.min_value() == value and self.max_value() == value:
assert self.final_value() == value
return True
else:
return False
def final_value(self):
raise NotImplementedError
def initialize(self):
super().initialize()
self.register_summary(label='parameters', name=('parameters/' + self.name))
self.register_tracking(label='parameters', name=self.name, spec=self.spec)
def input_signature(self, *, function):
if function == 'value':
return SignatureDict()
else:
return super().input_signature(function=function)
def output_signature(self, *, function):
if function == 'value':
return SignatureDict(singleton=self.spec.signature(batched=False))
else:
return super().output_signature(function=function)
def parameter_value(self, *, step):
raise NotImplementedError
@tf_function(num_args=0)
def value(self):
if self.unit is None:
step = None
else:
step = self.root.units[self.unit]
parameter = self.parameter_value(step=step)
dependencies = self.spec.tf_assert(
x=parameter, include_type_shape=True,
message='Parameter.value: invalid {{issue}} for {name} value.'.format(name=self.name)
)
name = 'parameters/' + self.name
if self.unit is None:
step = 'timesteps'
else:
step = self.unit
dependencies.extend(self.summary(label='parameters', name=name, data=parameter, step=step))
dependencies.extend(self.track(label='parameters', name=self.name, data=parameter))
with tf.control_dependencies(control_inputs=dependencies):
return tf_util.identity(input=parameter)
|
src/tests/simian/auth/x509_test.py | tristansgray/simian | 326 | 11104946 | <filename>src/tests/simian/auth/x509_test.py
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""x509 module tests."""
import array
import types
import mox
import stubout
from pyasn1_modules import rfc2459
from google.apputils import app
from google.apputils import basetest
from simian.auth import tlslite_bridge
from simian.auth import x509
def _RDNSeqFromTuple(values):
seq = rfc2459.RDNSequence()
for i, v in enumerate(values):
oi_type = '.'.join([str(x) for x in v[0]])
typevalue = rfc2459.AttributeTypeAndValue()
typevalue.setComponentByPosition(0, rfc2459.AttributeType(oi_type))
typevalue.setComponentByPosition(1, rfc2459.AttributeValue(v[1]))
seq.setComponentByPosition(
i,
rfc2459.RelativeDistinguishedName().setComponentByPosition(
0, typevalue))
return rfc2459.Name().setComponentByPosition(0, seq)
class X509ModuleTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testLoadPemGeneric(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END'
input = '\n\n\n-----BEGIN-----\nhello\n-----END-----\n\n\n'
expected = [
'-----BEGIN-----',
'hello',
'-----END-----',
]
self.assertEqual(expected, x509.LoadPemGeneric(input, header, footer))
def testLoadPemGenericWhenInfo(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END'
input = ('\n\n\n-----BEGIN-----\n'
'Proc-Type: foo\nhello\n-----END-----\n\n\n')
expected = [
'-----BEGIN-----',
'hello',
'-----END-----',
]
self.assertEqual(expected, x509.LoadPemGeneric(input, header, footer))
def testLoadPemGenericWhenSpaces(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END'
input = ' \n\n\n-----BEGIN----- \nhello \n-----END----- \n\n\n '
expected = [
'-----BEGIN-----',
'hello',
'-----END-----',
]
self.assertEqual(expected, x509.LoadPemGeneric(input, header, footer))
def testLoadPemGenericWhenSpacesNoLastNewline(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END'
input = ' \n\n\n-----BEGIN----- \nhello \n-----END-----'
expected = [
'-----BEGIN-----',
'hello',
'-----END-----',
]
self.assertEqual(expected, x509.LoadPemGeneric(input, header, footer))
def testLoadPemGenericWhenMissingHeader(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN BLAH'
footer = 'END BLAH'
input = '\n\n\n-----BEGIN-----\nhello\n-----END-----\n\n\n'
self.assertRaises(
x509.HeaderMissingPEMFormatError, x509.LoadPemGeneric,
input, header, footer)
def testLoadPemGenericWhenMissingFooter(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END BLAH'
input = '\n\n\n-----BEGIN-----\nhello\n-----END-----\n\n\n'
self.assertRaises(
x509.FooterMissingPEMFormatError, x509.LoadPemGeneric,
input, header, footer)
def testLoadPemGenericWhenTooFewLines(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END BLAH'
input = '\n\n\n-----BEGIN-----\n\n\n\n'
self.assertRaises(
x509.PEMFormatError, x509.LoadPemGeneric, input, header, footer)
def testLoadCertificateFromPEM(self):
"""Test LoadCertificateFromPEM()."""
header = 'BEGIN CERTIFICATE'
footer = 'END CERTIFICATE'
pem_input = 'pem_input'
pem_output = ['---header---', 'base64', '---footer---']
self.mox.StubOutWithMock(x509, 'LoadPemGeneric')
self.mox.StubOutWithMock(x509, 'LoadCertificateFromBase64')
x509.LoadPemGeneric(pem_input, header, footer).AndReturn(pem_output)
x509.LoadCertificateFromBase64('base64').AndReturn('ok')
self.mox.ReplayAll()
self.assertEqual(x509.LoadCertificateFromPEM(pem_input), 'ok')
self.mox.VerifyAll()
def testLoadRSAPrivateKeyFromPEM(self):
"""Test LoadRSAPrivateKeyFromPEM()."""
header = 'BEGIN RSA PRIVATE KEY'
footer = 'END RSA PRIVATE KEY'
pem_input = 'pem_input'
pem_output = ['---header---', 'base64', '---footer---']
self.mox.StubOutWithMock(x509, 'LoadPemGeneric')
self.mox.StubOutWithMock(
x509.tlslite_bridge, 'parsePEMKey')
x509.LoadPemGeneric(pem_input, header, footer).AndReturn(pem_output)
x509.tlslite_bridge.parsePEMKey(
'\n'.join(pem_output)).AndReturn('ok')
self.mox.ReplayAll()
self.assertEqual(x509.LoadRSAPrivateKeyFromPEM(pem_input), 'ok')
self.mox.VerifyAll()
def testLoadRSAPrivateKeyFromPEMWhenSyntaxError(self):
"""Test LoadRSAPrivateKeyFromPEM()."""
header = 'BEGIN RSA PRIVATE KEY'
footer = 'END RSA PRIVATE KEY'
pem_input = 'pem_input'
pem_output = ['---header---', 'base64', '---footer---']
self.mox.StubOutWithMock(x509, 'LoadPemGeneric')
self.mox.StubOutWithMock(
x509.tlslite_bridge, 'parsePEMKey')
x509.LoadPemGeneric(pem_input, header, footer).AndReturn(pem_output)
x509.tlslite_bridge.parsePEMKey(
'\n'.join(pem_output)).AndRaise(SyntaxError)
self.mox.ReplayAll()
self.assertRaises(
x509.RSAPrivateKeyPEMFormatError,
x509.LoadRSAPrivateKeyFromPEM, pem_input)
self.mox.VerifyAll()
def testLoadCertificateFromBase64(self):
"""Test LoadCertificateFromBase64()."""
self.mox.StubOutWithMock(x509.base64, 'b64decode')
self.mox.StubOutWithMock(x509, 'BASE64_RE')
x509.BASE64_RE.search('b64str').AndReturn(True)
x509.base64.b64decode('b64str').AndReturn('binary')
mock_x509 = self.mox.CreateMockAnything()
self.stubs.Set(x509, 'X509Certificate', mock_x509)
mock_x509().AndReturn(mock_x509)
mock_x509.LoadFromByteString('binary').AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(
mock_x509,
x509.LoadCertificateFromBase64('b64str'))
self.mox.VerifyAll()
def testLoadCertificateFromBase64WhenBase64CharacterCheckFail(self):
"""Test LoadCertificateFromBase64()."""
self.mox.StubOutWithMock(x509.base64, 'b64decode')
self.mox.StubOutWithMock(x509, 'BASE64_RE')
x509.BASE64_RE.search('b64str').AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(
x509.PEMFormatError,
x509.LoadCertificateFromBase64, 'b64str')
self.mox.VerifyAll()
def testLoadCertificateFromBase64WhenBase64DecodeFail(self):
"""Test LoadCertificateFromBase64()."""
self.mox.StubOutWithMock(x509.base64, 'b64decode')
self.mox.StubOutWithMock(x509, 'BASE64_RE')
x509.BASE64_RE.search('b64str').AndReturn(True)
x509.base64.b64decode('b64str').AndRaise(TypeError)
self.mox.ReplayAll()
self.assertRaises(
x509.PEMFormatError,
x509.LoadCertificateFromBase64, 'b64str')
self.mox.VerifyAll()
class BaseDataObjectTest(mox.MoxTestBase):
"""Test BaseDataObject class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.bdo = x509.BaseDataObject()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testGetDataDict(self):
"""Test _GetDataDict()."""
try:
self.bdo._GetDataDict()
self.fail('NotImplementedError not raised')
except NotImplementedError:
pass
def testCreateGetMethod(self):
"""Test CreateGetMethod()."""
mock_dataobj = self.mox.CreateMockAnything()
mock_dataobj._GetDataDict().AndReturn({'foo': 123})
def MockSetattr(_, key, value):
self.assertEqual(key, 'GetFoo')
self.assertTrue(type(value) is types.FunctionType)
self.assertEqual(123, value(mock_dataobj))
self.mox.ReplayAll()
x509.BaseDataObject.CreateGetMethod('Foo', 'foo', setattr_=MockSetattr)
self.mox.VerifyAll()
class X509CertificateTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.x = x509.X509Certificate()
self._cert_reset = {
'serial_num': None,
'issuer': None,
'subject': None,
'valid_notbefore': None,
'valid_notafter': None,
'fields_data': None,
'sig_data': None,
'sig_algorithm': None,
'entire_cert_data': None,
'public_key': None,
'may_act_as_ca': None,
'key_usage': None,
'subject_alt_name': None,
}
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def _CheckSaneCertFields(self, d):
"""Check that output dict keys are defined in _cert_reset.
Args:
d: dict, output from a _Get*FromSequence method
"""
for k in d:
self.assertTrue(k in self._cert_reset, 'Key %s is invalid in _cert' % k)
def testInit(self):
"""Test __init__()."""
self.mox.StubOutWithMock(x509.X509Certificate, 'Reset')
x509.X509Certificate.Reset().AndReturn(None)
self.mox.ReplayAll()
unused = x509.X509Certificate()
self.mox.VerifyAll()
def testReset(self):
"""Test Reset()."""
self.x.Reset()
self.assertEqual(self.x._cert, self._cert_reset)
def testCreateGetMethods(self):
"""Test the autogenerated methods from CreateGetMethod()."""
names = [
'Issuer',
'Subject',
'DatetimeNotValidBefore',
'DatetimeNotValidAfter',
'FieldsData',
'SignatureData',
'SignatureAlgorithm',
'SerialNumber',
'EntireCertData',
'PublicKey',
'MayActAsCA',
'KeyUsage',
'SubjectAltName',
]
for name in names:
self.assertTrue(
hasattr(self.x, 'Get%s' % name), 'has method Get%s' % name)
self.assertTrue(
type(getattr(self.x, 'Get%s' % name)) is types.MethodType,
'Get%s is a method' % name)
def testGetDataDict(self):
"""Test _GetDataDict()."""
self.assertEqual(self.x._cert, self.x._GetDataDict())
def testCertTimestampToDatetime(self):
"""Test _CertTimestampToDatetime()."""
self.mox.StubOutWithMock(x509.time, 'strptime')
self.mox.StubOutWithMock(x509.datetime, 'datetime', True)
time_ary = (1981, 1, 11, 0, 0, 0, 0, 'bla')
x509.time.strptime('ts', self.x.TIMESTAMP_FMT).AndReturn(time_ary)
x509.datetime.datetime(*time_ary[0:7]).AndReturn('datetime')
self.mox.ReplayAll()
self.assertEqual('datetime', self.x._CertTimestampToDatetime(('ts', None)))
self.mox.VerifyAll()
def testStrToArray(self):
"""Test StrToArray()."""
r = tlslite_bridge.StrToArray('12313')
self.assertEqual(5, len(r))
self.assertTrue(isinstance(r, bytearray) or isinstance(r, array.array))
def testCertTimestampToDatetimeWhenBadTimestamp(self):
"""Test _CertTimestampToDatetime()."""
self.mox.StubOutWithMock(x509.time, 'strptime')
x509.time.strptime('ts', self.x.TIMESTAMP_FMT).AndRaise(ValueError)
self.mox.ReplayAll()
self.assertRaises(x509.CertificateValueError,
self.x._CertTimestampToDatetime, ('ts', None))
self.mox.VerifyAll()
def testAttributeValueToString(self):
"""Test _AttributeValueToString()."""
value = 'newyork'
expected = 'newyork'
self.assertEqual(value, expected)
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenLeadingBadCharsSpace(self):
"""Test _AttributeValueToString()."""
value = ' new york'
expected = '\\ new york'
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenLeadingBadCharsHash(self):
"""Test _AttributeValueToString()."""
value = '#new york'
expected = '\\#new york'
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenTrailingBadCharsSpace(self):
"""Test _AttributeValueToString()."""
value = 'new york '
expected = 'new york\\ '
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenContainsNull(self):
"""Test _AttributeValueToString()."""
value = 'new%syork' % chr(00)
expected = 'new\\00york'
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringPreventIndexRegression(self):
"""Test _AttributeValueToString()."""
value = ',newyork'
expected = '\\,newyork'
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenCharsNeedingEscaping(self):
"""Test _AttributeValueToString()."""
chars = ['"', '+', ',', ';', '<', '>', '\\']
for c in chars:
value = 'new%syork' % c
expected = 'new\\%syork' % c
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenMultipleAdjacentTransformsNeeded(self):
"""Test _AttributeValueToString()."""
value = ' new,york;; '
expected = '\\ new\\,york\\;\\;\\ '
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
value = '#new,york;\x00, '
expected = '\\#new\\,york\\;\\00\\,\\ '
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAssembleDNSequence(self):
"""Test _AssembleDNSequence()."""
value = _RDNSeqFromTuple((
(x509.OID_ID['CN'], 'foo'),
(x509.OID_ID['OU'], 'bar'),))
self.mox.StubOutWithMock(self.x, '_AttributeValueToString')
self.x._AttributeValueToString('foo').AndReturn('foo')
self.x._AttributeValueToString('bar').AndReturn('bar')
self.mox.ReplayAll()
self.assertEqual(self.x._AssembleDNSequence(value), 'CN=foo,OU=bar')
self.mox.VerifyAll()
def testAssembleDNSequenceWhenUnknownOID(self):
"""Test _AssembleDNSequence()."""
bad_oid = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
value = _RDNSeqFromTuple((
(bad_oid, 'foo'),
(x509.OID_ID['OU'], 'bar'),))
self.assertRaises(
x509.CertificateParseError,
self.x._AssembleDNSequence,
value)
def testGetFieldsFromSequence(self):
"""Test _GetFieldsFromSequence()."""
sig_alg_seq = ('a', 'b')
sig_alg = 'sigalg'
before_ts = self.mox.CreateMockAnything()
after_ts = self.mox.CreateMockAnything()
mock_utctime = self.mox.CreateMockAnything()
self.stubs.Set(x509.pyasn1.type.useful, 'UTCTime', mock_utctime)
before_ts.isSameTypeWith(mox.IgnoreArg()).AndReturn(True)
after_ts.isSameTypeWith(mox.IgnoreArg()).AndReturn(True)
serial_num = 12345
v3ext = {
'may_act_as_ca': 123,
'key_usage': (1, 2, 3),
'subject_alt_name': 'subj alt name',
}
seq = {
'version': x509.X509_CERT_VERSION_3,
'serialNumber': serial_num,
'signature': sig_alg_seq,
'issuer': (((x509.OID_ID['CN'], 'issuer'),),),
'validity': {
'notBefore': before_ts,
'notAfter': after_ts
},
'subject': (((x509.OID_ID['CN'], 'subject'),),),
'extensions': 'x509v3 extensions',
'extra_key': 'dsadas'
}
seq_encoded = 'raw bytes'
before_dt = 'before_dt'
after_dt = 'after_dt'
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_CertTimestampToDatetime')
self.mox.StubOutWithMock(self.x, '_GetV3ExtensionFieldsFromSequence')
self.mox.StubOutWithMock(self.x, '_AssembleDNSequence')
self.mox.StubOutWithMock(x509.der_encoder, 'encode', True)
self.x._GetSignatureAlgorithmFromSequence(
sig_alg_seq).AndReturn(sig_alg)
self.x._AssembleDNSequence(seq['issuer']).AndReturn('CN=issuer')
self.x._CertTimestampToDatetime(before_ts).AndReturn(before_dt)
self.x._CertTimestampToDatetime(after_ts).AndReturn(after_dt)
self.x._AssembleDNSequence(seq['subject']).AndReturn('CN=subject')
self.x._GetV3ExtensionFieldsFromSequence(seq['extensions']).AndReturn(v3ext)
x509.der_encoder.encode(seq).AndReturn(seq_encoded)
self.mox.ReplayAll()
output = self.x._GetFieldsFromSequence(seq)
self._CheckSaneCertFields(output)
self.assertEqual({
'serial_num': serial_num,
'issuer': u'CN=issuer',
'subject': u'CN=subject',
'valid_notbefore': before_dt,
'valid_notafter': after_dt,
'fields_data': seq_encoded,
'sig_algorithm': sig_alg,
'may_act_as_ca': v3ext['may_act_as_ca'],
'key_usage': v3ext['key_usage'],
'subject_alt_name': v3ext['subject_alt_name'],
}, output)
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenWrongVersion(self):
"""Test _GetFieldsFromSequence()."""
seq = {
'version': x509.X509_CERT_VERSION_3 * 2, # fails
}
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetFieldsFromSequence, seq)
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenValidityNotBeforeFail(self):
"""Test _GetFieldsFromSequence()."""
sig_alg_seq = ('a', 'b')
sig_alg = 'sigalg'
before_ts = self.mox.CreateMockAnything()
after_ts = self.mox.CreateMockAnything()
mock_utctime = self.mox.CreateMockAnything()
self.stubs.Set(x509.pyasn1.type.useful, 'UTCTime', mock_utctime)
before_ts.isSameTypeWith(mox.IgnoreArg()).AndReturn(False) # fails
serial_num = 12345
bad_oid_cn = (9) * 10
seq = {
'version': x509.X509_CERT_VERSION_3,
'serialNumber': serial_num,
'signature': sig_alg_seq,
'issuer': (((x509.OID_ID['CN'], 'issuer'),),),
'validity': {
'notBefore': before_ts,
'notAfter': after_ts
},
'subject': (((x509.OID_ID['CN'], 'subject'),),),
'extensions': 'x509v3 extensions',
}
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_AssembleDNSequence')
self.x._GetSignatureAlgorithmFromSequence(
sig_alg_seq).AndReturn(sig_alg)
self.x._AssembleDNSequence(seq['issuer']).AndReturn('CN=issuer')
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetFieldsFromSequence, seq)
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenValidityNotAfterFail(self):
"""Test _GetFieldsFromSequence()."""
sig_alg_seq = ('a', 'b')
sig_alg = 'sigalg'
before_ts = self.mox.CreateMockAnything()
after_ts = self.mox.CreateMockAnything()
mock_utctime = self.mox.CreateMockAnything()
self.stubs.Set(x509.pyasn1.type.useful, 'UTCTime', mock_utctime)
before_ts.isSameTypeWith(mox.IgnoreArg()).AndReturn(True)
after_ts.isSameTypeWith(mox.IgnoreArg()).AndReturn(False) # fails
serial_num = 12345
bad_oid_cn = (9) * 10
seq = {
'version': x509.X509_CERT_VERSION_3,
'serialNumber': serial_num,
'signature': sig_alg_seq,
'issuer': (((x509.OID_ID['CN'], 'issuer'),),),
'validity': {
'notBefore': before_ts,
'notAfter': after_ts
},
'subject': (((x509.OID_ID['CN'], 'subject'),),),
'extensions': 'x509v3 extensions',
}
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_AssembleDNSequence')
self.x._GetSignatureAlgorithmFromSequence(
sig_alg_seq).AndReturn(sig_alg)
self.x._AssembleDNSequence(seq['issuer']).AndReturn('CN=issuer')
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetFieldsFromSequence, seq)
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenX509V3Missing(self):
"""Test _GetFieldsFromSequence()."""
sig_alg_seq = ('a','b')
sig_alg = 'sigalg'
before_ts = self.mox.CreateMockAnything()
after_ts = self.mox.CreateMockAnything()
mock_utctime = self.mox.CreateMockAnything()
self.stubs.Set(x509.pyasn1.type.useful, 'UTCTime', mock_utctime)
before_ts.isSameTypeWith(mox.IgnoreArg()).AndReturn(True)
after_ts.isSameTypeWith(mox.IgnoreArg()).AndReturn(True)
serial_num = 12345
seq = {
'version': x509.X509_CERT_VERSION_3,
'serialNumber': serial_num,
'signature': sig_alg_seq,
'issuer': (((x509.OID_ID['CN'], 'issuer'),),),
'validity': {
'notBefore': before_ts,
'notAfter': after_ts
},
'subject': (((x509.OID_ID['CN'], 'subject'),),),
}
seq_encoded = 'raw bytes'
before_dt = 'before_dt'
after_dt = 'after_dt'
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_CertTimestampToDatetime')
self.mox.StubOutWithMock(self.x, '_AssembleDNSequence')
self.mox.StubOutWithMock(x509.der_encoder, 'encode', True)
self.x._GetSignatureAlgorithmFromSequence(
sig_alg_seq).AndReturn(sig_alg)
self.x._AssembleDNSequence(seq['issuer']).AndReturn('CN=issuer')
self.x._CertTimestampToDatetime(before_ts).AndReturn(before_dt)
self.x._CertTimestampToDatetime(after_ts).AndReturn(after_dt)
self.x._AssembleDNSequence(seq['subject']).AndReturn('CN=subject')
x509.der_encoder.encode(seq).AndReturn(seq_encoded)
self.mox.ReplayAll()
output = self.x._GetFieldsFromSequence(seq)
self._CheckSaneCertFields(output)
self.assertEqual(
output, {
'serial_num': serial_num,
'issuer': 'CN=issuer',
'subject': 'CN=subject',
'valid_notbefore': before_dt,
'valid_notafter': after_dt,
'fields_data': seq_encoded,
'sig_algorithm': sig_alg,
})
self.mox.VerifyAll()
def testGetSignatureAlgorithmFromSequence(self):
"""Test _GetSignatureAlgorithmFromSequence()."""
alg = self.x.SIGNATURE_ALGORITHMS[0]
seq = {'algorithm': alg}
output = self.x._GetSignatureAlgorithmFromSequence(seq)
self._CheckSaneCertFields(output)
self.assertEqual(output['sig_algorithm'], alg)
def testGetSignatureAlgorithmFromSequenceWhenBadOID(self):
"""Test _GetSignatureAlgorithmFromSequence()."""
alg = (5, 4, 3, 2, 1) # fake OID
self.assertFalse(alg in self.x.SIGNATURE_ALGORITHMS)
seq = {'algorithm': alg}
self.assertRaises(
x509.CertificateValueError,
self.x._GetSignatureAlgorithmFromSequence, seq)
def testGetSignatureFromSequence(self):
"""Test _GetSignatureFromSequence()."""
bits = 1024
good_seq = [1] * bits
good_sig = (bits/8) * 'x'
self.mox.StubOutWithMock(x509.der_encoder, 'encode', True)
x509.der_encoder.encode(good_seq).AndReturn('junkJunkJUNK%s' % good_sig)
self.mox.ReplayAll()
output = self.x._GetSignatureFromSequence(good_seq)
self._CheckSaneCertFields(output)
self.assertEqual(output['sig_data'], good_sig)
self.mox.VerifyAll()
def testGetSignatureFromSequenceWhenShortSeq(self):
"""Test _GetSignatureFromSequence()."""
short_seq = [1] * 5
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetSignatureFromSequence, short_seq)
self.mox.VerifyAll()
def testGetSignatureFromSequenceWhenNonBinarySeq(self):
"""Test _GetSignatureFromSequence()."""
non_binary_seq = [2] * 2048
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetSignatureFromSequence, non_binary_seq)
self.mox.VerifyAll()
def testGetCertSequencesFromTopSequence(self):
"""Test GetCertSequencesFromTopSequence()."""
seq = ({'tbsCertificate': 0, 'signatureAlgorithm': 1, 'signatureValue': 2},)
self.mox.StubOutWithMock(self.x, '_GetFieldsFromSequence')
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_GetSignatureFromSequence')
self.x._GetFieldsFromSequence(seq[0]['tbsCertificate']).AndReturn({'a': 1})
self.x._GetSignatureAlgorithmFromSequence(
seq[0]['signatureAlgorithm']).AndReturn({
'b': 1
})
self.x._GetSignatureFromSequence(seq[0]['signatureValue']).AndReturn({
'c': 1
})
self.mox.ReplayAll()
o = self.x._GetCertSequencesFromTopSequence(seq)
self.assertEqual(o, {'a': 1, 'b': 1, 'c': 1})
self.mox.VerifyAll()
def testGetCertSequencesFromTopSequenceWhenBadTuple(self):
"""Test _GetCertSequencesFromTopSequence()."""
seq = ()
self.assertRaises(
x509.CertificateParseError,
self.x._GetCertSequencesFromTopSequence,
seq)
seq = 'not a tuple'
self.assertRaises(
x509.CertificateParseError,
self.x._GetCertSequencesFromTopSequence,
seq)
def testGetPublicKeyFromByteString(self):
"""Test _GetPublicKeyFromByteString()."""
bytes = 'bytes'
publickey = 'publickey'
self.mox.StubOutClassWithMocks(x509.tlslite_bridge, 'X509')
mock_tls509 = x509.tlslite_bridge.X509()
mock_tls509.parseBinary(bytes).AndReturn(None)
mock_tls509.publicKey = publickey
self.mox.ReplayAll()
self.assertEqual(
{'public_key': publickey},
self.x._GetPublicKeyFromByteString(bytes))
self.mox.VerifyAll()
def testLoadFromByteString(self):
"""Test LoadFromByteString()."""
self.x.Reset()
base_cert = self.x._cert
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
self.mox.StubOutWithMock(self.x, '_GetCertSequencesFromTopSequence')
self.mox.StubOutWithMock(self.x, '_GetPublicKeyFromByteString')
self.mox.StubOutWithMock(self.x, 'Reset')
data = 'bytes'
seq = 'seq'
certseq = {'certseq': 1}
pubkey = {'pubkey': 1}
cert = {'entire_byte_string': data}
cert.update(base_cert)
cert.update(certseq)
cert.update(pubkey)
x509.der_decoder.decode(data, asn1Spec=mox.IgnoreArg()).AndReturn(seq)
self.x._GetCertSequencesFromTopSequence(seq).AndReturn(certseq)
self.x._GetPublicKeyFromByteString(data).AndReturn(pubkey)
self.x.Reset().AndReturn(None)
self.mox.ReplayAll()
self.x.LoadFromByteString(data)
self.assertEqual(self.x._cert, cert)
self.mox.VerifyAll()
def testLoadFromByteStringWhenPyAsn1Error(self):
"""Test LoadFromByteString()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
data = 'bytes'
x509.der_decoder.decode(
data, asn1Spec=mox.IgnoreArg()).AndRaise(x509.pyasn1.error.PyAsn1Error)
self.mox.ReplayAll()
self.assertRaises(x509.CertificateASN1FormatError,
self.x.LoadFromByteString, data)
self.mox.VerifyAll()
def testCheckValidityWhenObtainUtc(self):
"""Test CheckValidity()."""
mock_datetime = self.mox.CreateMock(x509.datetime.datetime)
self.stubs.Set(x509.datetime, 'datetime', mock_datetime)
mock_datetime.utcnow().AndReturn(2)
self.x._cert['valid_notafter'] = 5
self.x._cert['valid_notbefore'] = 0
self.mox.ReplayAll()
self.x.CheckValidity()
self.mox.VerifyAll()
def testCheckValidityWhenTooNew(self):
"""Test CheckValidity()."""
self.x._cert['valid_notafter'] = 1
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateError,
self.x.CheckValidity,
2)
self.mox.VerifyAll()
def testCheckValidityWhenTooOld(self):
"""Test CheckValidity()."""
self.x._cert['valid_notafter'] = 10
self.x._cert['valid_notbefore'] = 5
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateError,
self.x.CheckValidity,
2)
self.mox.VerifyAll()
def testCheckIssuerWhenNoIssuerSupplied(self):
"""Test CheckIssuer()."""
self.x._required_issuer = 'required'
self.x._cert['issuer'] = 'required'
self.mox.ReplayAll()
self.x.CheckIssuer()
self.mox.VerifyAll()
def testCheckIssuerWhenFailed(self):
"""Test CheckIssuer()."""
self.x._required_issuer = None
self.x._cert['issuer'] = 'required'
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateValueError,
self.x.CheckIssuer, 'some other issuer')
self.mox.VerifyAll()
def testCheckIssuerWhenNoRequirement(self):
"""Test CheckIssuer()."""
self.x._required_issuer = None
self.x._cert['issuer'] = 'no one cares'
self.mox.ReplayAll()
self.x.CheckIssuer()
self.mox.VerifyAll()
def testCheckAll(self):
"""Test CheckAll()."""
self.mox.StubOutWithMock(self.x, 'CheckValidity')
self.mox.StubOutWithMock(self.x, 'CheckIssuer')
self.x.CheckValidity().AndReturn(None)
self.x.CheckIssuer().AndReturn(None)
self.mox.ReplayAll()
self.x.CheckAll()
self.mox.VerifyAll()
def testSetRequiredIssuer(self):
"""Test SetRequiredIssuer()."""
self.x.SetRequiredIssuer('required')
self.assertEqual(self.x._required_issuer, 'required')
def testIsSignedBy(self):
"""Test IsSignedBy()."""
self.mox.StubOutWithMock(tlslite_bridge, 'StrToArray')
self.mox.StubOutWithMock(self.x, 'GetSignatureData')
self.mox.StubOutWithMock(self.x, 'GetFieldsData')
mock_othercert = self.mox.CreateMockAnything()
mock_othercert.GetMayActAsCA().AndReturn(True)
mock_othercert.GetPublicKey().AndReturn(mock_othercert) # lazy re-use
self.x.GetSignatureData().AndReturn('sigdata')
self.x.GetFieldsData().AndReturn('fieldsdata')
tlslite_bridge.StrToArray('sigdata').AndReturn('arysigdata')
tlslite_bridge.StrToArray('fieldsdata').AndReturn('aryfieldsdata')
mock_othercert.hashAndVerify('arysigdata', 'aryfieldsdata').AndReturn(True)
self.mox.ReplayAll()
self.assertTrue(self.x.IsSignedBy(mock_othercert))
self.mox.VerifyAll()
def testIsSignedByWhenOtherCertNotCA(self):
"""Test IsSignedBy()."""
mock_othercert = self.mox.CreateMockAnything()
mock_othercert.GetMayActAsCA().AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateValueError,
self.x.IsSignedBy, mock_othercert)
self.mox.VerifyAll()
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
|
docs/gallery-examples/example5_oec.py | hdevillepoix/astroquery | 577 | 11104950 | from astroquery import open_exoplanet_catalogue as oec
from astroquery.open_exoplanet_catalogue import findvalue
cata = oec.get_catalogue()
kepler68b = cata.find(".//planet[name='Kepler-68 b']")
print(findvalue(kepler68b, 'mass'))
"""
0.02105109
"""
|
data_collection/gazette/spiders/sc_presidente_nereu.py | kaiocp/querido-diario | 454 | 11104952 | <filename>data_collection/gazette/spiders/sc_presidente_nereu.py
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScPresidenteNereuSpider(FecamGazetteSpider):
name = "sc_presidente_nereu"
FECAM_QUERY = "cod_entidade:212"
TERRITORY_ID = "4214102"
|
ding/torch_utils/tests/test_ckpt_helper.py | sailxjx/DI-engine | 464 | 11104992 | <reponame>sailxjx/DI-engine
import os
import time
import pytest
import torch
import torch.nn as nn
import uuid
from ding.torch_utils.checkpoint_helper import auto_checkpoint, build_checkpoint_helper, CountVar
from ding.utils import read_file, save_file
class DstModel(nn.Module):
def __init__(self):
super(DstModel, self).__init__()
self.fc1 = nn.Linear(3, 3)
self.fc2 = nn.Linear(3, 8)
self.fc_dst = nn.Linear(3, 6)
class SrcModel(nn.Module):
def __init__(self):
super(SrcModel, self).__init__()
self.fc1 = nn.Linear(3, 3)
self.fc2 = nn.Linear(3, 8)
self.fc_src = nn.Linear(3, 7)
class HasStateDict(object):
def __init__(self, name):
self._name = name
self._state_dict = name + str(uuid.uuid4())
def state_dict(self):
old = self._state_dict
self._state_dict = self._name + str(uuid.uuid4())
return old
def load_state_dict(self, state_dict):
self._state_dict = state_dict
@pytest.mark.unittest
class TestCkptHelper:
def test_load_model(self):
path = 'model.pt'
os.popen('rm -rf ' + path)
time.sleep(1)
dst_model = DstModel()
src_model = SrcModel()
ckpt_state_dict = {'model': src_model.state_dict()}
torch.save(ckpt_state_dict, path)
ckpt_helper = build_checkpoint_helper({})
with pytest.raises(RuntimeError):
ckpt_helper.load(path, dst_model, strict=True)
ckpt_helper.load(path, dst_model, strict=False)
assert torch.abs(dst_model.fc1.weight - src_model.fc1.weight).max() < 1e-6
assert torch.abs(dst_model.fc1.bias - src_model.fc1.bias).max() < 1e-6
dst_model = DstModel()
src_model = SrcModel()
assert torch.abs(dst_model.fc1.weight - src_model.fc1.weight).max() > 1e-6
src_optimizer = HasStateDict('src_optimizer')
dst_optimizer = HasStateDict('dst_optimizer')
src_last_epoch = CountVar(11)
dst_last_epoch = CountVar(5)
src_last_iter = CountVar(110)
dst_last_iter = CountVar(50)
src_dataset = HasStateDict('src_dataset')
dst_dataset = HasStateDict('dst_dataset')
src_collector_info = HasStateDict('src_collect_info')
dst_collector_info = HasStateDict('dst_collect_info')
ckpt_helper.save(
path,
src_model,
optimizer=src_optimizer,
dataset=src_dataset,
collector_info=src_collector_info,
last_iter=src_last_iter,
last_epoch=src_last_epoch,
prefix_op='remove',
prefix="f"
)
ckpt_helper.load(
path,
dst_model,
dataset=dst_dataset,
optimizer=dst_optimizer,
last_iter=dst_last_iter,
last_epoch=dst_last_epoch,
collector_info=dst_collector_info,
strict=False,
state_dict_mask=['fc1'],
prefix_op='add',
prefix="f"
)
assert dst_dataset.state_dict().startswith('src')
assert dst_optimizer.state_dict().startswith('src')
assert dst_collector_info.state_dict().startswith('src')
assert dst_last_iter.val == 110
for k, v in dst_model.named_parameters():
assert k.startswith('fc')
print('==dst', dst_model.fc2.weight)
print('==src', src_model.fc2.weight)
assert torch.abs(dst_model.fc2.weight - src_model.fc2.weight).max() < 1e-6
assert torch.abs(dst_model.fc1.weight - src_model.fc1.weight).max() > 1e-6
checkpoint = read_file(path)
checkpoint.pop('dataset')
checkpoint.pop('optimizer')
checkpoint.pop('last_iter')
save_file(path, checkpoint)
ckpt_helper.load(
path,
dst_model,
dataset=dst_dataset,
optimizer=dst_optimizer,
last_iter=dst_last_iter,
last_epoch=dst_last_epoch,
collector_info=dst_collector_info,
strict=True,
state_dict_mask=['fc1'],
prefix_op='add',
prefix="f"
)
with pytest.raises(NotImplementedError):
ckpt_helper.load(
path,
dst_model,
strict=False,
lr_schduler='lr_scheduler',
last_iter=dst_last_iter,
)
with pytest.raises(KeyError):
ckpt_helper.save(path, src_model, prefix_op='key_error', prefix="f")
ckpt_helper.load(path, dst_model, strict=False, prefix_op='key_error', prefix="f")
os.popen('rm -rf ' + path + '*')
@pytest.mark.unittest
def test_count_var():
var = CountVar(0)
var.add(5)
assert var.val == 5
var.update(3)
assert var.val == 3
@pytest.mark.unittest
def test_auto_checkpoint():
class AutoCkptCls:
def __init__(self):
pass
@auto_checkpoint
def start(self):
for i in range(10):
if i < 5:
time.sleep(0.2)
else:
raise Exception("There is an exception")
break
def save_checkpoint(self, ckpt_path):
print('Checkpoint is saved successfully in {}!'.format(ckpt_path))
auto_ckpt = AutoCkptCls()
auto_ckpt.start()
if __name__ == '__main__':
test = TestCkptHelper()
test.test_load_model()
|
simba/outlier_scripts/movement/rw_dfs.py | justinshenk/simba | 172 | 11105002 | import pandas as pd
def read_df(currentFilePath, wfileType):
if wfileType == 'csv':
currDf = pd.read_csv(currentFilePath)
if wfileType == 'parquet':
currDf = pd.read_parquet(currentFilePath)
return currDf
def save_df(currDf, wfileType, path):
if wfileType == 'csv':
currDf.to_csv(path, index=False)
if wfileType == 'parquet':
currDf.to_parquet(path, index=False) |
data/transcoder_evaluation_gfg/python/FAST_MULTIPLICATION_METHOD_WITHOUT_USING_MULTIPLICATION_OPERATOR_RUSSIAN_PEASANTS_ALGORITHM.py | mxl1n/CodeGen | 241 | 11105021 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b ) :
res = 0
while ( b > 0 ) :
if ( b & 1 ) :
res = res + a
a = a << 1
b = b >> 1
return res
#TOFILL
if __name__ == '__main__':
param = [
(4,33,),
(36,67,),
(65,52,),
(55,37,),
(35,76,),
(69,98,),
(84,62,),
(5,80,),
(15,36,),
(67,84,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
plenario/apiary/validators.py | vforgione/plenario | 109 | 11105028 | from collections import defaultdict
from wtforms import ValidationError
from plenario.database import postgres_session
from plenario.models.SensorNetwork import FeatureMeta, NetworkMeta
def validate_sensor_properties(observed_properties):
if not observed_properties:
raise ValidationError('No observed properties were provided!')
features = defaultdict(list)
for feature in postgres_session.query(FeatureMeta).all():
for property_dict in feature.observed_properties:
features[feature.name].append(property_dict['name'])
for feature_property in list(observed_properties.values()):
try:
feat, prop = feature_property.split('.')
except ValueError:
raise ValidationError('Feature specified without property: {}'
.format(feature_property))
if feat not in features:
raise ValidationError('Bad FOI name: {!r}'.format(feat))
if prop not in features[feat]:
raise ValidationError('Bad property name: {!r}'.format(prop))
def assert_json_enclosed_in_brackets(json_list):
if type(json_list) != list:
raise ValidationError('JSON must be enclosed in brackets: [ {...} ]')
def validate_node(network):
if network not in [net.name for net in postgres_session.query(NetworkMeta).all()]:
raise ValidationError('Invalid network name!')
def map_to_redshift_type(property_dict):
"""Given a dictionary of the form {'name': 'foo', 'value': 'bar'}, pass
or coerce the 'value' strings to one of four types: BOOLEAN, DOUBLE
PRECISION, BIGINT, VARCHAR.
:param property_dict: contains apiary provided column definition
:raises: ValidationError: if a provided value is unmappable
"""
redshift_type_map = {
'BOOL': 'BOOLEAN',
'INT': 'BIGINT',
'INTEGER': 'BIGINT',
'DOUBLE': 'DOUBLE PRECISION',
'FLOAT': 'DOUBLE PRECISION',
'STRING': 'VARCHAR'
}
value = property_dict['type'].upper()
type_aliases = set(redshift_type_map.keys())
type_standards = set(redshift_type_map.values())
if value not in type_standards:
if value not in type_aliases:
raise ValidationError('Invalid type provided: {}'.format(value))
else:
property_dict['value'] = redshift_type_map[value]
|
tests/dopamine/jax/agents/full_rainbow/full_rainbow_agent_test.py | kuldeepbrd1/dopamine | 9,825 | 11105059 | <reponame>kuldeepbrd1/dopamine
# coding=utf-8
# Copyright 2021 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dopamine.jax.agents.full_rainbow.full_rainbow_agent."""
from absl.testing import absltest
from dopamine.discrete_domains import atari_lib
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.jax.agents.full_rainbow import full_rainbow_agent
from dopamine.utils import test_utils
from flax import linen as nn
import gin
import jax.numpy as jnp
import numpy as onp
class FullRainbowAgentTest(absltest.TestCase):
def setUp(self):
super(FullRainbowAgentTest, self).setUp()
self._num_actions = 4
self._num_atoms = 5
self._vmax = 7.
self.observation_shape = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE
self.observation_dtype = dqn_agent.NATURE_DQN_DTYPE
self.stack_size = dqn_agent.NATURE_DQN_STACK_SIZE
self.zero_state = onp.zeros((1,) + self.observation_shape +
(self.stack_size,))
gin.bind_parameter('OutOfGraphPrioritizedReplayBuffer.replay_capacity', 100)
gin.bind_parameter('OutOfGraphPrioritizedReplayBuffer.batch_size', 2)
gin.bind_parameter('JaxDQNAgent.min_replay_history', 32)
gin.bind_parameter('JaxDQNAgent.epsilon_eval', 0.0)
gin.bind_parameter('JaxDQNAgent.epsilon_decay_period', 90)
def _create_test_agent(self):
# This dummy network allows us to deterministically anticipate that
# action 0 will be selected by an argmax.
# In Rainbow we are dealing with a distribution over Q-values,
# which are represented as num_atoms bins, ranging from -vmax to vmax.
# The output layer will have num_actions * num_atoms elements,
# so each group of num_atoms weights represent the logits for a
# particular action. By setting 1s everywhere, except for the first
# num_atoms (representing the logits for the first action), which are
# set to onp.arange(num_atoms), we are ensuring that the first action
# places higher weight on higher Q-values; this results in the first
# action being chosen.
class MockFullRainbowNetwork(nn.Module):
"""Custom Jax network used in tests."""
num_actions: int
num_atoms: int
noisy: bool
dueling: bool
distributional: bool
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, support, eval_mode=False, key=None):
def custom_init(key, shape, dtype=jnp.float32):
del key
to_pick_first_action = onp.ones(shape, dtype)
to_pick_first_action[:, :self.num_atoms] = onp.arange(
1, self.num_atoms + 1)
return to_pick_first_action
x = x.astype(jnp.float32)
x = x.reshape((-1)) # flatten
x = nn.Dense(
features=self.num_actions * self.num_atoms,
kernel_init=custom_init,
bias_init=nn.initializers.ones)(
x)
logits = x.reshape((self.num_actions, self.num_atoms))
if not self.distributional:
qs = jnp.sum(logits, axis=-1) # Sum over all the num_atoms
return atari_lib.DQNNetworkType(qs)
probabilities = nn.softmax(logits)
qs = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(qs, logits, probabilities)
agent = full_rainbow_agent.JaxFullRainbowAgent(
network=MockFullRainbowNetwork,
num_actions=self._num_actions,
num_atoms=self._num_atoms,
vmax=self._vmax,
distributional=True,
epsilon_fn=lambda w, x, y, z: 0.0, # No exploration.
)
# This ensures non-random action choices (since epsilon_eval = 0.0) and
# skips the train_step.
agent.eval_mode = True
return agent
def testCreateAgentWithDefaults(self):
# Verifies that we can create and train an agent with the default values.
agent = full_rainbow_agent.JaxFullRainbowAgent(num_actions=4)
observation = onp.ones([84, 84, 1])
agent.begin_episode(observation)
agent.step(reward=1, observation=observation)
agent.end_episode(reward=1)
def testShapesAndValues(self):
agent = self._create_test_agent()
self.assertEqual(agent._support.shape[0], self._num_atoms)
self.assertEqual(jnp.min(agent._support), -self._vmax)
self.assertEqual(jnp.max(agent._support), self._vmax)
state = onp.ones((1, 28224))
net_output = agent.network_def.apply(agent.online_params, state,
agent._support)
self.assertEqual(net_output.logits.shape,
(self._num_actions, self._num_atoms))
self.assertEqual(net_output.probabilities.shape, net_output.logits.shape)
self.assertEqual(net_output.logits.shape[0], self._num_actions)
self.assertEqual(net_output.logits.shape[1], self._num_atoms)
self.assertEqual(net_output.q_values.shape, (self._num_actions,))
def testBeginEpisode(self):
"""Tests the functionality of agent.begin_episode.
Specifically, the action returned and its effect on the state.
"""
agent = self._create_test_agent()
# We fill up the state with 9s. On calling agent.begin_episode the state
# should be reset to all 0s.
agent.state.fill(9)
first_observation = onp.ones(self.observation_shape + (1,))
self.assertEqual(agent.begin_episode(first_observation), 0)
# When the all-1s observation is received, it will be placed at the end of
# the state.
expected_state = self.zero_state
expected_state[:, :, :, -1] = onp.ones((1,) + self.observation_shape)
onp.array_equal(agent.state, expected_state)
onp.array_equal(agent._observation, first_observation[:, :, 0])
# No training happens in eval mode.
self.assertEqual(agent.training_steps, 0)
# This will now cause training to happen.
agent.eval_mode = False
# Having a low replay memory add_count will prevent any of the
# train/prefetch/sync ops from being called.
agent._replay.add_count = 0
second_observation = onp.ones(self.observation_shape + (1,)) * 2
agent.begin_episode(second_observation)
# The agent's state will be reset, so we will only be left with the all-2s
# observation.
expected_state[:, :, :, -1] = onp.full((1,) + self.observation_shape, 2)
onp.array_equal(agent.state, expected_state)
onp.array_equal(agent._observation, second_observation[:, :, 0])
# training_steps is incremented since we set eval_mode to False.
self.assertEqual(agent.training_steps, 1)
def testStepEval(self):
"""Tests the functionality of agent.step() in eval mode.
Specifically, the action returned, and confirms that no training happens.
"""
agent = self._create_test_agent()
base_observation = onp.ones(self.observation_shape + (1,))
# This will reset state and choose a first action.
agent.begin_episode(base_observation)
# We mock the replay buffer to verify how the agent interacts with it.
agent._replay = test_utils.MockReplayBuffer()
expected_state = self.zero_state
num_steps = 10
for step in range(1, num_steps + 1):
# We make observation a multiple of step for testing purposes (to
# uniquely identify each observation).
observation = base_observation * step
self.assertEqual(agent.step(reward=1, observation=observation), 0)
stack_pos = step - num_steps - 1
if stack_pos >= -self.stack_size:
expected_state[:, :, :, stack_pos] = onp.full(
(1,) + self.observation_shape, step)
onp.array_equal(agent.state, expected_state)
onp.array_equal(agent._last_observation,
onp.ones(self.observation_shape) * (num_steps - 1))
onp.array_equal(agent._observation, observation[:, :, 0])
# No training happens in eval mode.
self.assertEqual(agent.training_steps, 0)
# No transitions are added in eval mode.
self.assertEqual(agent._replay.add.call_count, 0)
def testStepTrain(self):
"""Test the functionality of agent.step() in train mode.
Specifically, the action returned, and confirms training is happening.
"""
agent = self._create_test_agent()
agent.eval_mode = False
base_observation = onp.ones(self.observation_shape + (1,))
# We mock the replay buffer to verify how the agent interacts with it.
agent._replay = test_utils.MockReplayBuffer(is_jax=True)
# This will reset state and choose a first action.
agent.begin_episode(base_observation)
expected_state = self.zero_state
num_steps = 10
for step in range(1, num_steps + 1):
# We make observation a multiple of step for testing purposes (to
# uniquely identify each observation).
observation = base_observation * step
self.assertEqual(agent.step(reward=1, observation=observation), 0)
stack_pos = step - num_steps - 1
if stack_pos >= -self.stack_size:
expected_state[:, :, :, stack_pos] = onp.full(
(1,) + self.observation_shape, step)
onp.array_equal(agent.state, expected_state)
onp.array_equal(agent._last_observation,
onp.full(self.observation_shape, num_steps - 1))
onp.array_equal(agent._observation, observation[:, :, 0])
# We expect one more than num_steps because of the call to begin_episode.
self.assertEqual(agent.training_steps, num_steps + 1)
self.assertEqual(agent._replay.add.call_count, num_steps)
agent.end_episode(reward=1)
self.assertEqual(agent._replay.add.call_count, num_steps + 1)
def testStoreTransitionWithUniformSampling(self):
agent = full_rainbow_agent.JaxFullRainbowAgent(
num_actions=4, replay_scheme='uniform')
dummy_frame = onp.zeros((84, 84))
# Adding transitions with default, 10., default priorities.
agent._store_transition(dummy_frame, 0, 0, False)
agent._store_transition(dummy_frame, 0, 0, False, priority=10.)
agent._store_transition(dummy_frame, 0, 0, False)
returned_priorities = agent._replay.get_priority(
onp.arange(self.stack_size - 1, self.stack_size + 2, dtype=onp.int32))
expected_priorities = [1., 10., 1.]
onp.array_equal(returned_priorities, expected_priorities)
def testStoreTransitionWithPrioritizedSampling(self):
agent = full_rainbow_agent.JaxFullRainbowAgent(
num_actions=4, replay_scheme='prioritized')
dummy_frame = onp.zeros((84, 84))
# Adding transitions with default, 10., default priorities.
agent._store_transition(dummy_frame, 0, 0, False)
agent._store_transition(dummy_frame, 0, 0, False, priority=10.)
agent._store_transition(dummy_frame, 0, 0, False)
returned_priorities = agent._replay.get_priority(
onp.arange(self.stack_size - 1, self.stack_size + 2, dtype=onp.int32))
expected_priorities = [1., 10., 10.]
onp.array_equal(returned_priorities, expected_priorities)
if __name__ == '__main__':
absltest.main()
|
apps/base/urls/parameter.py | youssriaboelseod/pyerp | 115 | 11105070 | <filename>apps/base/urls/parameter.py
"""The store routes
"""
# Django Library
from django.urls import path
# Localfolder Library
from ..views.parameter import (
ParameterCreateView, ParameterDeleteView, ParameterDetailView,
ParameterListView, ParameterUpdateView)
app_name = 'PyParameter'
urlpatterns = [
path('', ParameterListView.as_view(), name='list'),
path('add/', ParameterCreateView.as_view(), name='add'),
path('<int:pk>/', ParameterDetailView.as_view(), name='detail'),
path('<int:pk>/update', ParameterUpdateView.as_view(), name='update'),
path('<int:pk>/delete/', ParameterDeleteView.as_view(), name='delete'),
]
|
xfel/command_line/FEE_average_plot.py | dperl-sol/cctbx_project | 155 | 11105105 | <filename>xfel/command_line/FEE_average_plot.py
from __future__ import absolute_import, division, print_function
from six.moves import range
from psana import *
import numpy as np
from libtbx import easy_pickle
import iotbx.phil, sys
import libtbx.load_env
from libtbx.utils import Sorry, Usage
master_phil = """
dispatch{
events_begin = None
.type = int
.help = If not specified, process all events. Otherwise, process events beginning at this number.
events_end = None
.type = int
.help = If not specified, process all events. Otherwise, process events ending at this number.
max_events = None
.type = int
.help = If not specified, process all events. Otherwise, only process this many
events_accepted = False
.type = bool
.help = Plot average of filtered events
events_rejected = False
.type = bool
.help = Plot average of rejected events
events_all = False
.type = bool
.help = Plot average of all events
}
input {
cfg = None
.type = str
.help = Path to psana config file
experiment = None
.type = str
help = Experiment identifier, e.g. cxi84914
run_num = None
.type = int
.help = Run number or run range to process
address = None
.type = str
.help = FEE detector address, e.g. FEE-SPEC0
dark = None
.type = str
.help = Path to FEE dark pickle file
pixel_to_eV{
energy_per_px = None
.type = float
.help = Energy per pixel conversion if known
x_coord_one = None
.type = int
.help = Pixel valued x coordinate of known energy y.
y_coord_one = None
.type = int
.help = Energy in eV of y coordinate of known x pixel position.
x_coord_two = None
.type = int
.help = Pixel valued x coordinate of known energy y.
y_coord_two = None
.type = int
.help = Energy in eV of y coordinate of known x pixel position.
}
}
output {
output_dir = .
.type = str
.help = Directory output files will be placed
}
"""
def run(args):
phil = iotbx.phil.process_command_line(args=args, master_string=master_phil).show()
usage = \
""" %s input.experiment=experimentname input.run_num=N input.address=address
"""%libtbx.env.dispatcher_name
params = phil.work.extract()
if not os.path.exists(params.output.output_dir):
raise Sorry("Output path not found:" + params.output.output_dir)
if params.input.experiment is None or \
params.input.run_num is None or \
params.input.address is None:
raise Usage(usage)
# set up psana
if params.dispatch.events_accepted or params.dispatch.events_rejected:
assert params.input.cfg is not None
setConfigFile(params.input.cfg)
dataset_name = "exp=%s:run=%s:idx"%(params.input.experiment,params.input.run_num)
ds = DataSource(dataset_name)
src = Source('DetInfo(%s)'%params.input.address)
# set up multiprocessing with MPI
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
if params.dispatch.max_events is None:
max_events = sys.maxsize
else:
max_events = params.dispatch.max_events
if params.input.dark is not None:
dark = easy_pickle.load('%s'%params.input.dark)
for run in ds.runs():
times = run.times()
if (params.dispatch.events_begin is None and params.dispatch.events_end is None):
times = times[:]
elif (params.dispatch.events_begin is not None and params.dispatch.events_end is None):
times = times[params.dispatch.events_begin:]
elif (params.dispatch.events_begin is None and params.dispatch.events_end is not None):
times = times[:params.dispatch.events_end]
elif (params.dispatch.events_begin is not None and params.dispatch.events_end is not None):
times = times[params.dispatch.events_begin:params.dispatch.events_end]
nevents = min(len(times),max_events)
# chop the list into pieces, depending on rank. This assigns each process
# events such that the get every Nth event where N is the number of processes
mytimes = [times[i] for i in range(nevents) if (i+rank)%size == 0]
print(len(mytimes))
#mytimes = mytimes[len(mytimes)-1000:len(mytimes)]
totals = np.array([0.0])
print("initial totals", totals)
for i, t in enumerate(mytimes):
print("Event", i, "of", len(mytimes), end=' ')
evt = run.event(t)
if params.dispatch.events_accepted or params.dispatch.events_all:
if evt.get("skip_event")==True:
continue
elif params.dispatch.events_rejected:
if evt.get("skip_event")==False:
continue
try:
data = evt.get(Camera.FrameV1,src)
except ValueError as e:
src = Source('BldInfo(%s)'%params.input.address)
data = evt.get(Bld.BldDataSpectrometerV1, src)
if data is None:
print("No data")
continue
#set default to determine FEE data type
two_D=False
#check attribute of data for type
try:
data = np.array(data.data16().astype(np.int32))
two_D=True
except AttributeError as e:
data = np.array(data.hproj().astype(np.float64))
if two_D:
if 'dark' in locals():
data = data - dark
one_D_data = np.sum(data,0)/data.shape[0]
two_D_data = np.double(data)
else:
#used to fix underflow problem that was present in earlier release of psana and pressent for LH80
for i in range(len(data)):
if data[i]>1000000000:
data[i]=data[i]-(2**32)
if 'dark' in locals():
data = data - dark
one_D_data = data
totals[0] += 1
print("total good:", totals[0])
if not 'fee_one_D' in locals():
fee_one_D = one_D_data
else:
fee_one_D += one_D_data
if ('two_D_data' in locals() and not 'fee_two_D' in locals()):
fee_two_D = two_D_data
elif 'fee_two_D' in locals():
fee_two_D += two_D_data
acceptedtotals = np.zeros(totals.shape)
acceptedfee1 = np.zeros((fee_one_D.shape))
if 'fee_two_D' in locals():
acceptedfee2 = np.zeros((fee_two_D.shape))
print("Synchronizing rank", rank)
comm.Reduce(fee_one_D,acceptedfee1)
comm.Reduce(totals,acceptedtotals)
if 'acceptedfee2' in locals():
comm.Reduce(fee_two_D,acceptedfee2)
print("number averaged", acceptedtotals[0])
if rank == 0:
if acceptedtotals[0] > 0:
acceptedfee1 /= acceptedtotals[0]
if 'acceptedfee2' in locals():
acceptedfee2 /= acceptedtotals[0]
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pylab import savefig,close
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from matplotlib import cm
if params.dispatch.events_accepted:
easy_pickle.dump(os.path.join(params.output.output_dir,"fee_avg_1_D_"+'r%s'%params.input.run_num+"_accepted.pickle"), acceptedfee1)
pp1 = PdfPages(os.path.join(params.output.output_dir,"fee_avg_1_D_"+'r%s'%params.input.run_num+"_accepted.pdf"))
if 'acceptedfee2' in locals():
easy_pickle.dump(os.path.join(params.output.output_dir,"fee_avg_2_D_"+'r%s'%params.input.run_num+"_accepted.pickle"), acceptedfee2)
pp2 = PdfPages(os.path.join(params.output.output_dir,"fee_avg_2_D_"+'r%s'%params.input.run_num+"_accepted.pdf"))
if params.dispatch.events_all:
easy_pickle.dump(os.path.join(params.output.output_dir,"fee_avg_1_D_"+'r%s'%params.input.run_num+"_all.pickle"), acceptedfee1)
pp1 = PdfPages(os.path.join(params.output.output_dir,"fee_avg_1_D_"+'r%s'%params.input.run_num+"_all.pdf"))
if 'acceptedfee2' in locals():
easy_pickle.dump(os.path.join(params.output.output_dir,"fee_avg_2_D_"+'r%s'%params.input.run_num+"_all.pickle"), acceptedfee2)
pp2 = PdfPages(os.path.join(params.output.output_dir,"fee_avg_2_D_"+'r%s'%params.input.run_num+"_all.pdf"))
if params.dispatch.events_rejected:
easy_pickle.dump(os.path.join(params.output.output_dir,"fee_avg_1_D_"+'r%s'%params.input.run_num+"_rejected.pickle"), acceptedfee1)
pp1 = PdfPages(os.path.join(params.output.output_dir,"fee_avg_1_D_"+'r%s'%params.input.run_num+"_rejected.pdf"))
if 'acceptedfee2' in locals():
easy_pickle.dump(os.path.join(params.output.output_dir,"fee_avg_2_D_"+'r%s'%params.input.run_num+"_rejected.pickle"), acceptedfee2)
pp2 = PdfPages(os.path.join(params.output.output_dir,"fee_avg_2_D_"+'r%s'%params.input.run_num+"_rejected.pdf"))
print("Done")
#plotting result
# matplotlib needs a different backend when run on the cluster nodes at SLAC
# these two lines not needed when working interactively at SLAC, or on mac or on viper
if params.input.pixel_to_eV.energy_per_px is not None:
xvals = (np.array(range(acceptedfee1.shape[0]))-params.input.pixel_to_eV.x_coord_one)*params.input.pixel_to_eV.energy_per_px+params.input.pixel_to_eV.y_coord_one
xvals = xvals[::-1]
if params.input.pixel_to_eV.x_coord_two is not None:
eV_per_px = (params.input.pixel_to_eV.y_coord_two-params.input.pixel_to_eV.y_coord_one)/(params.input.pixel_to_eV.x_coord_two-params.input.pixel_to_eV.x_coord_one)
xvals = (np.array(range(acceptedfee1.shape[0]))-params.input.pixel_to_eV.x_coord_one)*eV_per_px+params.input.pixel_to_eV.y_coord_one
xvals = xvals[::-1]
if params.input.pixel_to_eV.x_coord_two is None and params.input.pixel_to_eV.energy_per_px is None:
xvals=np.arange(0,len(acceptedfee1),1)
yvals = acceptedfee1
def OneD_plot(X,Y):
plt.figure()
plt.clf()
plt.plot(X,Y)
if params.dispatch.events_accepted:
plt.title('Accepted Shots FEE Spectrum Run %s'%params.input.run_num)
elif params.dispatch.events_all:
plt.title('All Shots FEE Spectrum Run %s'%params.input.run_num)
elif params.dispatch.events_rejected:
plt.title('Rejected Shots FEE Spectrum Run %s'%params.input.run_num)
if params.input.pixel_to_eV.x_coord_one is not None:
plt.xlabel('eV', fontsize = 13)
else:
plt.xlabel('pixels', fontsize = 13)
plt.ylabel('pixels', fontsize = 13)
pp1.savefig()
def TwoD_plot(data):
plt.figure()
ax = plt.gca()
# use specified range 0, 50 to plot runs 117 - 201
#min=0, vmax=50
cax=ax.imshow(data, interpolation='nearest',origin='lower',cmap=cm.coolwarm)
plt.colorbar(cax, fraction=0.014, pad=0.04)
if params.dispatch.events_accepted:
ax.set_title('Accepted 2-D FEE Spectrum Run %s'%params.input.run_num)
elif params.dispatch.events_all:
ax.set_title('All 2-D FEE Spectrum Run %s'%params.input.run_num)
elif params.dispatch.events_rejected:
ax.set_title('Rejected 2-D FEE Spectrum Run %s'%params.input.run_num)
pp2.savefig()
OneD_plot(xvals,yvals)
pp1.close()
if 'acceptedfee2' in locals():
TwoD_plot(acceptedfee2)
pp2.close()
if __name__ == "__main__":
run(sys.argv[1:])
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 43/43.py | jaswinder9051998/Resources | 101 | 11105126 | #Create a script that generates a file where all letters of English alphabet are listed two in each line
import string
with open("letters.txt", "w") as file:
for letter1, letter2 in zip(string.ascii_lowercase[0::2], string.ascii_letters[1::2]):
file.write(letter1 + letter2 + "\n")
|
src/fit.py | dusekjan/VI1_midi_file | 367 | 11105127 | <gh_stars>100-1000
import cv2
import numpy as np
from box import Box
from train import *
import os
import pickle
def predict(img):
if not os.path.exists('trained_models/nn_trained_model_hog.sav'):
print('Please wait while training the NN-HOG model....')
train('NN', 'hog', 'nn_trained_model_hog')
model = pickle.load(open('trained_models/nn_trained_model_hog.sav', 'rb'))
features = extract_features(img, 'hog')
labels = model.predict([features])
return labels
# if __name__ == "__main__":
# img = cv2.imread('testresult/0_6.png')
# labels = predict(img)
# print(labels)
|
amadeus/airport/predictions/_on_time.py | akshitsingla/amadeus-python | 125 | 11105133 | <filename>amadeus/airport/predictions/_on_time.py<gh_stars>100-1000
from amadeus.client.decorator import Decorator
class AirportOnTime(Decorator, object):
def get(self, **params):
'''
Returns a percentage of on-time flight departures
.. code-block:: python
amadeus.airport.predictions.on_time.get(
airportCode='JFK',
date='2020-09-01')
:param airportCode: the City/Airport IATA code from which
the flight will depart. ``"NYC"``, for example for New York
:param date: the date on which to fly out, in `YYYY-MM-DD` format
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v1/airport/predictions/on-time', **params)
|
tests/unit/test_ncaaf_teams.py | MArtinherz/sportsipy | 221 | 11105170 | <reponame>MArtinherz/sportsipy
from flexmock import flexmock
from mock import PropertyMock
from sportsipy.ncaaf.schedule import Schedule
from sportsipy.ncaaf.teams import Team
class TestNCAAFTeams:
def setup_method(self, *args, **kwargs):
flexmock(Team) \
.should_receive('_parse_team_data') \
.and_return(None)
self.team = Team(None)
def test_no_conference_wins_data_returns_default(self):
fake_conference_wins = PropertyMock(return_value='')
type(self.team)._conference_wins = fake_conference_wins
assert self.team.conference_wins is None
def test_no_conference_losses_data_returns_default(self):
fake_conference_losses = PropertyMock(return_value='')
type(self.team)._conference_losses = fake_conference_losses
assert self.team.conference_losses is None
def test_no_conference_percentage_returns_default(self):
fake_conf_win_percentage = PropertyMock(return_value='')
type(self.team)._conference_win_percentage = fake_conf_win_percentage
assert self.team.conference_win_percentage is None
def test_ncaaf_schedule_returns_schedule(self):
flexmock(Schedule) \
.should_receive('_pull_schedule') \
.and_return(None)
team = Team(None, 1)
assert len(team.schedule) == 0
|
mars/dataframe/indexing/set_axis.py | hxri/mars | 2,413 | 11105171 | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ... import opcodes
from ...core import ENTITY_TYPE, get_output_types, recursive_tile
from ...serialization.serializables import AnyField, Int8Field, KeyField
from ...utils import has_unknown_shape
from ..operands import DataFrameOperandMixin, DataFrameOperand
from ..utils import parse_index, validate_axis
class DataFrameSetAxis(DataFrameOperand, DataFrameOperandMixin):
_op_code_ = opcodes.DATAFRAME_SET_AXIS
_input = KeyField('input')
_axis = Int8Field('axis')
_value = AnyField('value')
def __init__(self, value=None, axis=None, **kw):
super().__init__(_value=value, _axis=axis, **kw)
@property
def input(self):
return self._input
@property
def value(self):
return self._value
@property
def axis(self):
return self._axis
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = inputs[0]
if isinstance(self.value, ENTITY_TYPE):
self._value = inputs[-1]
def __call__(self, df_or_series):
new_size = self.value.shape[0]
expect_size = df_or_series.axes[self.axis].shape[0]
if not np.isnan(new_size) and not np.isnan(expect_size) \
and new_size != expect_size:
raise ValueError(
f'Length mismatch: Expected axis has {expect_size} elements, '
f'new values have {new_size} elements'
)
params = df_or_series.params
if self.axis == 0:
params['index_value'] = parse_index(self.value) \
if isinstance(self.value, pd.Index) else self.value.index_value
else:
params['columns_value'] = parse_index(self.value, store_data=True) \
if isinstance(self.value, pd.Index) else self.value.index_value
pd_columns = self.value.index_value.to_pandas() \
if isinstance(self.value, ENTITY_TYPE) else self.value
params['dtypes'] = params['dtypes'].set_axis(pd_columns)
self._output_types = get_output_types(df_or_series)
inputs = [df_or_series]
if isinstance(self.value, ENTITY_TYPE):
inputs += [self.value]
return self.new_tileable(inputs, **params)
@classmethod
def tile(cls, op: 'DataFrameSetAxis'):
output = op.outputs[0]
input_tileables = [op.input]
value = op.value
if isinstance(value, ENTITY_TYPE):
input_tileables.append(value)
if has_unknown_shape(value):
yield
if any(np.isnan(s) for s in op.input.nsplits[op.axis]):
yield
if op.input.shape[op.axis] != value.shape[0]:
raise ValueError(
f'Length mismatch: Expected axis has {value.shape[0]} elements, '
f'new values have {op.input.shape[op.axis]} elements'
)
if isinstance(value, ENTITY_TYPE):
value = yield from recursive_tile(
value.rechunk({0: op.input.nsplits[op.axis]}))
input_tileables[-1] = value
slices = np.array((0,) + op.input.nsplits[op.axis]).cumsum()
slice_left = slices[:-1]
slice_right = slices[1:]
chunks = []
param_cache = [None] * len(op.input.nsplits[op.axis])
for inp_chunk in op.input.chunks:
input_chunks = [inp_chunk]
value_index = inp_chunk.index[op.axis]
params = inp_chunk.params
if isinstance(value, ENTITY_TYPE):
value_data = value.chunks[value_index]
input_chunks.append(value_data)
else:
value_data = value[slice_left[value_index]:slice_right[value_index]]
if param_cache[value_index] is None:
cached_params = param_cache[value_index] = dict()
if isinstance(value, ENTITY_TYPE):
if op.axis == 0:
cached_params['index_value'] = value_data.index_value
else:
cached_params['columns_value'] = value_data.index_value
cached_params['dtypes'] = output.dtypes.iloc[
slice_left[value_index]:slice_right[value_index]
]
else:
if op.axis == 0:
cached_params['index_value'] = parse_index(value_data)
else:
cached_params['columns_value'] = parse_index(value_data, store_data=True)
cached_params['dtypes'] = params['dtypes'].set_axis(value_data)
params.update(param_cache[value_index])
new_op = op.copy().reset_key()
new_op._value = value_data
chunks.append(new_op.new_chunk(input_chunks, **params))
params = op.outputs[0].params
params['chunks'] = chunks
params['nsplits'] = op.input.nsplits
new_op = op.copy().reset_key()
return new_op.new_tileables(input_tileables, **params)
@classmethod
def execute(cls, ctx, op: 'DataFrameSetAxis'):
in_data = ctx[op.input.key]
value = op.value
if isinstance(value, ENTITY_TYPE):
value = ctx[value.key]
ctx[op.outputs[0].key] = in_data.set_axis(value, axis=op.axis)
def _set_axis(df_or_axis, labels, axis=0, inplace=False):
axis = validate_axis(axis, df_or_axis)
if not isinstance(labels, ENTITY_TYPE) and not isinstance(labels, pd.Index):
labels = pd.Index(labels)
op = DataFrameSetAxis(value=labels, axis=axis)
result = op(df_or_axis)
if inplace:
df_or_axis.data = result.data
else:
return result
def df_set_axis(df, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
Indexes for column or row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to update. The value 0 identifies the rows, and 1 identifies the columns.
inplace : bool, default False
Whether to return a new DataFrame instance.
Returns
-------
renamed : DataFrame or None
An object of type DataFrame or None if ``inplace=True``.
See Also
--------
DataFrame.rename_axis : Alter the name of the index or columns.
Examples
--------
>>> import mars.dataframe as md
>>> df = md.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index').execute()
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns').execute()
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df.execute()
i ii
0 1 4
1 2 5
2 3 6
"""
return _set_axis(df, labels, axis=axis, inplace=inplace)
def series_set_axis(series, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
Indexes for row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index'}, default 0
The axis to update. The value 0 identifies the rows.
inplace : bool, default False
Whether to return a new Series instance.
Returns
-------
renamed : Series or None
An object of type Series or None if ``inplace=True``.
See Also
--------
Series.rename_axis : Alter the name of the index.
Examples
--------
>>> import mars.dataframe as md
>>> s = md.Series([1, 2, 3])
>>> s.execute()
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0).execute()
a 1
b 2
c 3
dtype: int64
"""
return _set_axis(series, labels, axis=axis, inplace=inplace)
|
tutorials/W2D3_BiologicalNeuronModels/solutions/W2D3_Tutorial1_Solution_27d69c89.py | eduardojdiniz/CompNeuro | 2,294 | 11105191 | def isi_cv_LIF(spike_times):
"""
Calculates the inter-spike intervals (isi) and
the coefficient of variation (cv) for a given spike_train
Args:
spike_times : (n, ) vector with the spike times (ndarray)
Returns:
isi : (n-1,) vector with the inter-spike intervals (ms)
cv : coefficient of variation of isi (float)
"""
if len(spike_times) >= 2:
# Compute isi
isi = np.diff(spike_times)
# Compute cv
cv = isi.std()/isi.mean()
else:
isi = np.nan
cv = np.nan
return isi, cv
# Set parameters
pars = default_pars(T=1000.)
mu_gwn = 250
sig_gwn1 = 0.5
sig_gwn2 = 3.0
# Run LIF model for sigma = 0.5
I_GWN1 = my_GWN(pars, mu=mu_gwn, sig=sig_gwn1, myseed=2020)
_, sp1 = run_LIF(pars, Iinj=I_GWN1)
# Run LIF model for sigma = 3
I_GWN2 = my_GWN(pars, mu=mu_gwn, sig=sig_gwn2, myseed=2020)
_, sp2 = run_LIF(pars, Iinj=I_GWN2)
# Compute ISIs/CV
isi1, cv1 = isi_cv_LIF(sp1)
isi2, cv2 = isi_cv_LIF(sp2)
# Visualize
with plt.xkcd():
my_hists(isi1, isi2, cv1, cv2, sig_gwn1, sig_gwn2) |
elodie/plugins/runtimeerror/runtimeerror.py | lancelotj/elodie | 964 | 11105234 | """
RuntimeError plugin object used for tests.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import print_function
from elodie.plugins.plugins import PluginBase
class RuntimeError(PluginBase):
__name__ = 'ThrowError'
"""A dummy class to execute plugin actions for tests."""
def __init__(self):
pass
def after(self, file_path, destination_folder, final_file_path, metadata):
print(does_not_exist)
def batch(self):
print(does_not_exist)
def before(self, file_path, destination_folder):
print(does_not_exist)
|
tests/runexp.py | SymbioticLab/Salus | 104 | 11105264 | #! /usr/bin/env python
from __future__ import print_function, absolute_import, division
import os
import sys
from subprocess import Popen
import csv
import argparse
from operator import attrgetter
import shlex
import time
from datetime import datetime
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path # pythontry:
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb') # 2 backport
class Workload(object):
"""Workload"""
def __init__(self, d, config):
self.name = d['name']
self.jct = float(d['jct'])
self.mem = float(d['mem'])
self.cmd = ['stdbuf', '-o0', '-e0', '--'] + shlex.split(d['cmd'])
self.env = os.environ.copy()
self.env['EXEC_ITER_NUMBER'] = d['env']
self.env['TF_CPP_MIN_LOG_LEVEL'] = '2'
self.env['CUDA_VISIBLE_DEVICES'] = '0,1'
self.outputpath = os.path.join(config.save_dir, self.name)
self.outputfile = None
self.proc = None
def runAsync(self):
print("Running '{}' > {}".format(' '.join(self.cmd), self.outputpath))
self.outputfile = open(self.outputpath, 'w')
self.proc = Popen(self.cmd, env=self.env, stdout=self.outputfile, stdin=DEVNULL)
return self.proc
def wait(self):
if self.proc:
self.proc.wait()
if self.outputfile:
self.outputfile.close()
def load_workloads(config):
workloads = []
with open(config.workloads, 'rb') as f:
reader = csv.reader(f)
for row in reader:
workloads.append(Workload({
'name': row[0],
'jct': row[1],
'mem': row[2],
'env': row[3],
'cmd': row[4]
}, config))
if config.workload_limit > 0 and len(workloads) > config.workload_limit:
workloads = workloads[:config.workload_limit]
return workloads
casekey = {
'shortest': ('jct', False),
'longest': ('jct', True),
'smallest': ('mem', False),
'largest': ('mem', True),
}
def runServer(config):
env = os.environ.copy()
env['CUDA_VISIBLE_DEVICES'] = '2,3'
env['TF_CPP_MIN_LOG_LEVEL'] = '2'
stdout = DEVNULL if config.hide_server_output else None
stderr = DEVNULL if config.hide_server_output else None
build_dir = os.path.abspath(config.build_dir)
serverP = Popen([
os.path.join(build_dir, 'Release', 'src', 'executor'),
'--disable-fairness',
'--logconf',
os.path.join(build_dir, config.server_log_config)
], env=env, stdin=DEVNULL, stdout=stdout, stderr=stderr)
time.sleep(5)
return serverP
def run(workloads, config):
if config.case not in casekey:
raise ValueError('Case should be one of ' + str(casekey.keys()))
key, desc = casekey[config.case]
key = attrgetter(key)
torun = sorted(workloads, key=key, reverse=desc)
print('{} works to run'.format(len(torun)))
serverP = runServer(config)
started = []
running = []
for w in torun:
print('Look at', w.name, " running ", len(running))
if len(running) < config.concurrent_jobs:
print('{}: Starting: {} ({} jobs running)'.format(datetime.now(), w.name, len(running)))
print('{}: Starting: {} ({} jobs running)'.format(datetime.now(), w.name, len(running)), file=sys.stderr)
started.append(w)
running.append((w.runAsync(), w.name))
else:
raise ValueError()
# Wait for something to finish
while len(running) >= config.concurrent_jobs and not serverP.poll():
def stillRunning(x):
p, name = x
if p.poll() is not None:
print('Done: {} (ret {})'.format(name, p.returncode))
return False
return True
running[:] = [x for x in running if stillRunning(x)]
time.sleep(.25)
print("Num running ", len(running))
if serverP.poll():
print('Error: server died: {}'.format(serverP.returncode))
break
for w in started:
w.wait()
serverP.terminate()
serverP.wait()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--hide_server_output', help='Hide server output', default=False, action='store_true')
parser.add_argument('--concurrent_jobs', help='Maximum concurrent running jobs', type=int, default=4)
parser.add_argument('--server_log_config', help='Log configuration to use for executor server',
default='disable.config')
parser.add_argument('--build_dir', help='Build directory', default='../build')
parser.add_argument('--save_dir', help='Output directory, default to the same name as case')
parser.add_argument('--workload_limit', help='Only run this number of workloads. If 0, means no limit',
type=int, default=0)
parser.add_argument('workloads', help='Path to the CSV containing workload info')
parser.add_argument('case', help='Which case to run', choices=casekey.keys())
config = parser.parse_args()
if config.save_dir is None:
config.save_dir = config.case
Path(config.save_dir).mkdir(exist_ok=True)
workloads = load_workloads(config)
run(workloads, config)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.