filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_21479 | from __future__ import absolute_import, division, print_function
import functools
import six
from django.conf import settings
from rest_framework.response import Response
from sentry.api.bases import OrganizationEventsEndpointBase
from sentry.api.helpers.group_search import build_query_params_from_request, get_by_short_id, ValidationError
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializerSnuba
from sentry.models import Environment, Group, GroupStatus, Project
from sentry.search.snuba.backend import SnubaSearchBackend
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
search = SnubaSearchBackend(**settings.SENTRY_SEARCH_OPTIONS)
class OrganizationGroupIndexEndpoint(OrganizationEventsEndpointBase):
def _build_query_params_from_request(self, request, organization, project_ids):
projects = list(
Project.objects.filter(
id__in=project_ids,
organization=organization,
)
)
return build_query_params_from_request(request, projects)
def _search(self, request, organization, project_ids, environments, extra_query_kwargs=None):
query_kwargs = self._build_query_params_from_request(request, organization, project_ids)
if extra_query_kwargs is not None:
assert 'environment' not in extra_query_kwargs
query_kwargs.update(extra_query_kwargs)
query_kwargs['environments'] = environments if environments else None
result = search.query(**query_kwargs)
return result, query_kwargs
def get(self, request, organization):
"""
List an Organization's Issues
`````````````````````````````
Return a list of issues (groups) bound to an organization. All parameters are
supplied as query string parameters.
A default query of ``is:unresolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``groupStatsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
The ``statsPeriod`` parameter can be used to select a date window starting
from now. Ex. ``14d``.
The ``start`` and ``end`` parameters can be used to select an absolute
date period to fetch issues from.
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string groupStatsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string start: Beginning date. You must also provide ``end``.
:qparam string end: End date. You must also provide ``start``.
:qparam bool shortIdLookup: if this is set to true then short IDs are
looked up by this function as well. This
can cause the return value of the function
to return an event issue of a different
project which is why this is an opt-in.
Set to `1` to enable.
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:unresolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
issues belong to.
:auth: required
"""
stats_period = request.GET.get('groupStatsPeriod')
if stats_period not in (None, '', '24h', '14d'):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = '24h'
elif stats_period == '':
# disable stats
stats_period = None
environments = list(Environment.objects.filter(
organization_id=organization.id,
name__in=self.get_environments(request, organization),
))
serializer = functools.partial(
StreamGroupSerializerSnuba,
environment_ids=[env.id for env in environments],
stats_period=stats_period,
)
project_ids = self.get_project_ids(request, organization)
if not project_ids:
return Response([])
query = request.GET.get('query', '').strip()
if query:
# check to see if we've got an event ID
if len(query) == 32:
groups = list(
Group.objects.filter_by_event_id(
project_ids,
query,
)
)
if groups:
return Response(serialize(groups, request.user, serializer()))
group = get_by_short_id(organization.id, request.GET.get('shortIdLookup'), query)
if group is not None:
# check to make sure user has access to project
if group.project_id in project_ids:
response = Response(
serialize(
[group], request.user, serializer()
)
)
response['X-Sentry-Direct-Hit'] = '1'
return response
try:
cursor_result, query_kwargs = self._search(
request, organization, project_ids, environments, {'count_hits': True})
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
results = list(cursor_result)
context = serialize(results, request.user, serializer())
# HACK: remove auto resolved entries
if query_kwargs.get('status') == GroupStatus.UNRESOLVED:
context = [r for r in context if r['status'] == 'unresolved']
response = Response(context)
self.add_cursor_headers(request, response, cursor_result)
# TODO(jess): add metrics that are similar to project endpoint here
return response
|
the-stack_0_21480 | from django.urls import path
from .views import users, signup, login_user, profile, logout_user
app_name = 'users'
urlpatterns = [
path('users', users, name='users'),
path('signup', signup, name='signup'),
path('login', login_user, name='login'),
path('profile', profile, name='profile'),
path('logout', logout_user, name='logout')
]
|
the-stack_0_21483 | import shlex
from itertools import chain
from .utils import *
import pytest
import scuba.utils
def _parse_cmdline(cmdline):
# Strip the formatting and whitespace
lines = [l.rstrip('\\').strip() for l in cmdline.splitlines()]
# Split each line, and return a flattened list of arguments
return chain.from_iterable(map(shlex.split, lines))
def _test_format_cmdline(args):
# Call the unit-under-test to get the formatted command line
result = scuba.utils.format_cmdline(args)
# Parse the result back out to a list of arguments
out_args = _parse_cmdline(result)
# Verify that they match
assert_seq_equal(out_args, args)
def test_format_cmdline():
'''format_cmdline works as expected'''
_test_format_cmdline([
'something',
'-a',
'-b',
'--long', 'option text',
'-s', 'hort',
'a very long argument here that will end up on its own line because it is so wide and nothing else will fit at the default width',
'and now',
'some', 'more', 'stuff',
'and even more stuff',
])
def test_shell_quote_cmd():
args = ['foo', 'bar pop', '"tee ball"']
result = scuba.utils.shell_quote_cmd(args)
out_args = shlex.split(result)
assert_seq_equal(out_args, args)
def test_parse_env_var():
'''parse_env_var returns a key, value pair'''
result = scuba.utils.parse_env_var('KEY=value')
assert result == ('KEY', 'value')
def test_parse_env_var_more_equals():
'''parse_env_var handles multiple equals signs'''
result = scuba.utils.parse_env_var('KEY=anotherkey=value')
assert result == ('KEY', 'anotherkey=value')
def test_parse_env_var_no_equals(monkeypatch):
'''parse_env_var handles no equals and gets value from environment'''
monkeypatch.setenv('KEY', 'mockedvalue')
result = scuba.utils.parse_env_var('KEY')
assert result == ('KEY', 'mockedvalue')
def test_parse_env_var_not_set(monkeypatch):
'''parse_env_var returns an empty string if not set'''
monkeypatch.delenv('NOTSET', raising=False)
result = scuba.utils.parse_env_var('NOTSET')
assert result == ('NOTSET', '')
def test_flatten_list__not_list():
with pytest.raises(ValueError):
scuba.utils.flatten_list('abc')
def test_flatten_list__not_nested():
sample = [1, 2, 3, 4]
result = scuba.utils.flatten_list(sample)
assert result == sample
def test_flatten_list__nested_1():
sample = [
1,
[2, 3],
4,
[5, 6, 7],
]
exp = range(1, 7+1)
result = scuba.utils.flatten_list(sample)
assert_seq_equal(result, exp)
def test_flatten_list__nested_many():
sample = [
1,
[2, 3],
[4, 5, [6, 7, 8]],
9, 10,
[11, [12, [13, [14, [15, [16, 17, 18]]]]]],
]
exp = range(1, 18+1)
result = scuba.utils.flatten_list(sample)
assert_seq_equal(result, exp)
|
the-stack_0_21485 | import time
import adafruit_ssd1306
import bitbangio as io
import board
import network
import ntptime
import ubinascii
import uhashlib
# pylint: disable=broad-except
# https://github.com/pyotp/pyotp example
totp = [("Discord ", 'JBSWY3DPEHPK3PXP'),
("Gmail ", 'abcdefghijklmnopqrstuvwxyz234567'),
("Accounts", 'asfdkwefoaiwejfa323nfjkl')]
ssid = 'my_wifi_ssid'
password = 'my_wifi_password'
TEST = False # if you want to print out the tests the hashers
ALWAYS_ON = False # Set to true if you never want to go to sleep!
ON_SECONDS = 60 # how long to stay on if not in always_on mode
i2c = io.I2C(board.SCL, board.SDA)
oled = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c)
# Gimme a welcome screen!
oled.fill(0)
oled.text('CircuitPython', 0, 0)
oled.text('PyTOTP Pal!', 0, 10)
oled.text(' <3 adafruit <3 ', 0, 20)
oled.show()
time.sleep(0.25)
EPOCH_DELTA = 946684800 # seconds between year 2000 and year 1970
SECS_DAY = 86400
SHA1 = uhashlib.sha1
if TEST:
print("===========================================")
print("SHA1 test: ", ubinascii.hexlify(SHA1(b'hello world').digest()))
# should be 2aae6c35c94fcfb415dbe95f408b9ce91ee846ed
# HMAC implementation, as hashlib/hmac wouldn't fit
# From https://en.wikipedia.org/wiki/Hash-based_message_authentication_code
def HMAC(k, m):
SHA1_BLOCK_SIZE = 64
KEY_BLOCK = k + (b'\0' * (SHA1_BLOCK_SIZE - len(k)))
KEY_INNER = bytes((x ^ 0x36) for x in KEY_BLOCK)
KEY_OUTER = bytes((x ^ 0x5C) for x in KEY_BLOCK)
inner_message = KEY_INNER + m
outer_message = KEY_OUTER + SHA1(inner_message).digest()
return SHA1(outer_message)
if TEST:
KEY = b'abcd'
MESSAGE = b'efgh'
print("===========================================")
print("HMAC test: ", ubinascii.hexlify(HMAC(KEY, MESSAGE).digest()))
# should be e5dbcf9263188f9fce90df572afeb39b66b27198
# Base32 decoder, since base64 lib wouldnt fit
def base32_decode(encoded):
missing_padding = len(encoded) % 8
if missing_padding != 0:
encoded += '=' * (8 - missing_padding)
encoded = encoded.upper()
chunks = [encoded[i:i + 8] for i in range(0, len(encoded), 8)]
out = []
for chunk in chunks:
bits = 0
bitbuff = 0
for c in chunk:
if 'A' <= c <= 'Z':
n = ord(c) - ord('A')
elif '2' <= c <= '7':
n = ord(c) - ord('2') + 26
elif c == '=':
continue
else:
raise ValueError("Not base32")
# 5 bits per 8 chars of base32
bits += 5
# shift down and add the current value
bitbuff <<= 5
bitbuff |= n
# great! we have enough to extract a byte
if bits >= 8:
bits -= 8
byte = bitbuff >> bits # grab top 8 bits
bitbuff &= ~(0xFF << bits) # and clear them
out.append(byte) # store what we got
return out
if TEST:
print("===========================================")
print("Base32 test: ", bytes(base32_decode("IFSGCZTSOVUXIIJB")))
# should be "Adafruit!!"
# Turns an integer into a padded-with-0x0 bytestr
def int_to_bytestring(i, padding=8):
result = []
while i != 0:
result.insert(0, i & 0xFF)
i >>= 8
result = [0] * (padding - len(result)) + result
return bytes(result)
# HMAC -> OTP generator, pretty much same as
# https://github.com/pyotp/pyotp/blob/master/src/pyotp/otp.py
def generate_otp(int_input, secret_key, digits=6):
if int_input < 0:
raise ValueError('input must be positive integer')
hmac_hash = bytearray(
HMAC(bytes(base32_decode(secret_key)),
int_to_bytestring(int_input)).digest()
)
offset = hmac_hash[-1] & 0xf
code = ((hmac_hash[offset] & 0x7f) << 24 |
(hmac_hash[offset + 1] & 0xff) << 16 |
(hmac_hash[offset + 2] & 0xff) << 8 |
(hmac_hash[offset + 3] & 0xff))
str_code = str(code % 10 ** digits)
while len(str_code) < digits:
str_code = '0' + str_code
return str_code
print("===========================================")
# Set up networking
sta_if = network.WLAN(network.STA_IF)
oled.fill(0)
oled.text('Connecting to', 0, 0)
oled.text(ssid, 0, 10)
oled.show()
if not sta_if.isconnected():
print("Connecting to SSID", ssid)
sta_if.active(True)
sta_if.connect(ssid, password)
while not sta_if.isconnected():
pass
print("Connected! IP = ", sta_if.ifconfig()[0])
# Done! Let them know we made it
oled.text("IP: " + sta_if.ifconfig()[0], 0, 20)
oled.show()
time.sleep(0.25)
# Get the latest time from NTP
t = None
while not t:
try:
t = ntptime.time()
except Exception:
pass
time.sleep(0.1)
# NTP time is seconds-since-2000
print("NTP time: ", t)
# But we need Unix time, which is seconds-since-1970
t += EPOCH_DELTA
print("Unix time: ", t)
# Instead of using RTC which means converting back and forth
# we'll just keep track of seconds-elapsed-since-NTP-call
mono_time = int(time.monotonic())
print("Monotonic time", mono_time)
countdown = ON_SECONDS # how long to stay on if not in always_on mode
while ALWAYS_ON or (countdown > 0):
# Calculate current time based on NTP + monotonic
unix_time = t - mono_time + int(time.monotonic())
print("Unix time: ", unix_time)
# Clear the screen
oled.fill(0)
y = 0
# We can do up to 3 per line on the Feather OLED
for name, secret in totp:
otp = generate_otp(unix_time // 30, secret)
print(name + " OTP output: ", otp) # serial debugging output
oled.text(name + ": " + str(otp), 0, y) # display name & OTP on OLED
y += 10 # Go to next line on OLED
# Display a little bar that 'counts down' how many seconds you have left
oled.framebuf.line(0, 31, 128 - (unix_time % 30) * 4, 31, True)
oled.show()
# We'll update every 1/4 second, we can hash very fast so its no biggie!
countdown -= 0.25
time.sleep(0.25)
# All these hashes will be lost in time(), like tears in rain. Time to die
oled.fill(0)
oled.show()
|
the-stack_0_21487 | import argparse
import os
import time
from greedy import greedy_cycle_tsp, nn_greedy_tsp, regret_1_greedy_cycle_tsp
from importer import create_distance_matrix, import_vertices_coordinates
from local_search import (
local_search_greedy,
local_search_steepest,
swap_edges,
swap_edges_diff,
swap_vertices,
swap_vertices_diff,
)
from simple_perturbation_local_search import simple_perturbation_local_search
from texttable import Texttable
from tqdm import tqdm
from utils import check_solution_correctness
from visualization import visualize_cycle_and_vertices
def get_argument_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='Heuristics types', dest='type')
constructive_parser = subparsers.add_parser('constructive')
constructive_parser.add_argument(
'--algorithm',
choices=get_constructive_algorithms_dict().keys(),
required=True,
help='Specify algorithm to be used',
)
local_search_parser = subparsers.add_parser('local_search')
local_search_parser.add_argument(
'--algorithm',
choices=get_local_search_algorithms_dict().keys(),
required=True,
help='Specify algorithm to be used',
)
local_search_parser.add_argument(
'--neighbourhood',
choices=get_local_search_neighbourhood_dict().keys(),
required=True,
help='Specify neighbourhood to be used',
)
local_search_parser.add_argument(
'--multiple_start_number',
default=1,
type=int,
help='Specify number of iterations in multiple start',
)
for subparser in [constructive_parser, local_search_parser]:
subparser.add_argument(
'--input_files',
nargs='+',
type=open,
help='Specify list of input files\' paths',
)
subparser.add_argument('--visualize', action='store_true')
return parser
def run_nn_gready_tsp(distance_matrix, vertices_coordinates, *args, **kwargs):
results = []
for index, _ in enumerate(tqdm(vertices_coordinates)):
start_time = time.time()
cycle_vertices, cycle_length = nn_greedy_tsp(distance_matrix, index)
check_solution_correctness(cycle_vertices, distance_matrix)
results.append((cycle_vertices, cycle_length, time.time() - start_time))
return results
def run_greedy_cycle_tsp(distance_matrix, vertices_coordinates, *args, **kwargs):
results = []
for index, _ in enumerate(tqdm(vertices_coordinates)):
start_time = time.time()
cycle_vertices, cycle_length = greedy_cycle_tsp(distance_matrix, index)
check_solution_correctness(cycle_vertices, distance_matrix)
results.append((cycle_vertices, cycle_length, time.time() - start_time))
return results
def run_regret_1_greedy_cycle_tsp(
distance_matrix, vertices_coordinates, *args, **kwargs
):
results = []
for index, _ in enumerate(tqdm(vertices_coordinates)):
start_time = time.time()
cycle_vertices, cycle_length = regret_1_greedy_cycle_tsp(distance_matrix, index)
check_solution_correctness(cycle_vertices, distance_matrix)
results.append((cycle_vertices, cycle_length, time.time() - start_time))
return results
def run_local_search_greedy(
distance_matrix, vertices_coordinates, *, multiple_start_number, **kwargs
):
results = []
for _ in tqdm(vertices_coordinates):
inner_results = []
start_time = time.time()
for _ in range(multiple_start_number):
cycle_vertices, cycle_length = local_search_greedy(
distance_matrix, **kwargs
)
check_solution_correctness(cycle_vertices, distance_matrix)
inner_results.append((cycle_vertices, cycle_length))
min_cycle_vertices, min_cycle_length, = min(inner_results, key=lambda x: x[1])
results.append((min_cycle_vertices, min_cycle_length, time.time() - start_time))
return results
def run_local_search_steepest(
distance_matrix, vertices_coordinates, *, multiple_start_number, **kwargs
):
results = []
for _ in tqdm(vertices_coordinates):
inner_results = []
start_time = time.time()
for _ in range(multiple_start_number):
cycle_vertices, cycle_length = local_search_steepest(
distance_matrix, **kwargs
)
check_solution_correctness(cycle_vertices, distance_matrix)
inner_results.append((cycle_vertices, cycle_length))
min_cycle_vertices, min_cycle_length, = min(inner_results, key=lambda x: x[1])
results.append((cycle_vertices, cycle_length, time.time() - start_time))
return results
def run_simple_perturbance_local_search(
distance_matrix, vertices_coordinates, *, multiple_start_number, **kwargs
):
results = []
for _ in tqdm(range(10)):
inner_results = []
start_time = time.time()
for _ in range(multiple_start_number):
cycle_vertices, cycle_length = simple_perturbation_local_search(
distance_matrix, **kwargs
)
check_solution_correctness(cycle_vertices, distance_matrix)
inner_results.append((cycle_vertices, cycle_length))
min_cycle_vertices, min_cycle_length, = min(inner_results, key=lambda x: x[1])
results.append((cycle_vertices, cycle_length, time.time() - start_time))
return results
def get_local_search_neighbourhood_dict():
return {
'vertices': {
'swap_function': swap_vertices,
'diff_function': swap_vertices_diff,
},
'edges': {'swap_function': swap_edges, 'diff_function': swap_edges_diff},
}
def get_local_search_extra_kwargs(run_args):
extra_kwargs = {'multiple_start_number': run_args.multiple_start_number}
neighbourhood_kwargs = get_local_search_neighbourhood_dict()[run_args.neighbourhood]
extra_kwargs.update(neighbourhood_kwargs)
return extra_kwargs
def get_local_search_algorithms_dict():
return {
'greedy': run_local_search_greedy,
'steepest': run_local_search_steepest,
'simple_perturbation': run_simple_perturbance_local_search,
}
def get_constructive_algorithms_dict():
return {
'nn_greedy': run_nn_gready_tsp,
'greedy_cycle': run_greedy_cycle_tsp,
'regret_1_greedy_cycle': run_regret_1_greedy_cycle_tsp,
}
def get_algorithms_dict():
return {
'constructive': get_constructive_algorithms_dict(),
'local_search': get_local_search_algorithms_dict(),
}
def run():
args = get_argument_parser().parse_args()
table = Texttable()
table.header(
[
'Name',
'Min length',
'Average length',
'Max length',
'Min time [s]',
'Average time [s]',
'Max time [s]',
]
)
solutions = []
for input_file in args.input_files:
instance_name = os.path.basename(input_file.name)
print(instance_name)
vertices_coordinates = import_vertices_coordinates(input_file)
distance_matrix = create_distance_matrix(vertices_coordinates)
algorithms_dict = get_algorithms_dict()[args.type]
run_function = algorithms_dict[args.algorithm]
extra_kwargs = (
get_local_search_extra_kwargs(args) if args.type == 'local_search' else {}
)
results = run_function(distance_matrix, vertices_coordinates, **extra_kwargs)
best_cycle, min_length, _ = min(results, key=lambda x: x[1])
average = sum([length for _, length, _ in results]) / len(results)
_, max_length, _ = max(results, key=lambda x: x[1])
_, _, min_time = min(results, key=lambda x: x[2])
average_time = sum([time for _, _, time in results]) / len(results)
_, _, max_time = max(results, key=lambda x: x[2])
table.add_row(
[
instance_name,
min_length,
average,
max_length,
min_time,
average_time,
max_time,
]
)
solutions.append((best_cycle, vertices_coordinates))
print(table.draw())
if args.visualize:
for solution in solutions:
best_cycle, vertices_coordinates = solution
visualize_cycle_and_vertices(best_cycle, vertices_coordinates)
if __name__ == '__main__':
run()
|
the-stack_0_21488 | import asyncio
from datetime import datetime
import inspect
import sys
import os
import json
import time
import csv
import decimal
from decimal import Decimal
from typing import Sequence, Optional
from aiorpcx.curio import timeout_after, TaskTimeout, TaskGroup
from .bitcoin import COIN
from .i18n import _
from .util import (ThreadJob, make_dir, log_exceptions,
make_aiohttp_session, resource_path)
from .network import Network
from .simple_config import SimpleConfig
from .logging import Logger
DEFAULT_ENABLED = False
DEFAULT_CURRENCY = "JPY"
DEFAULT_EXCHANGE = "CoinGecko" # default exchange should ideally provide historical rates
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(Logger):
def __init__(self, on_quotes, on_history):
Logger.__init__(self)
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
async def get_raw(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.get(url) as response:
response.raise_for_status()
return await response.text()
async def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.get(url) as response:
response.raise_for_status()
# set content_type to None to disable checking MIME type
return await response.json(content_type=None)
async def get_csv(self, site, get_string):
raw = await self.get_raw(site, get_string)
reader = csv.DictReader(raw.split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
async def update_safe(self, ccy):
try:
self.logger.info(f"getting fx quotes for {ccy}")
self.quotes = await self.get_rates(ccy)
self.logger.info("received fx quotes")
except asyncio.CancelledError:
# CancelledError must be passed-through for cancellation to work
raise
except BaseException as e:
self.logger.info(f"failed fx quotes: {repr(e)}")
self.quotes = {}
self.on_quotes()
def read_historical_rates(self, ccy, cache_dir) -> Optional[dict]:
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if not os.path.exists(filename):
return None
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
except:
return None
if not h: # e.g. empty dict
return None
h['timestamp'] = timestamp
self.history[ccy] = h
self.on_history()
return h
@log_exceptions
async def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.logger.info(f"requesting fx history for {ccy}")
h = await self.request_history(ccy)
self.logger.info(f"received fx history for {ccy}")
except BaseException as e:
self.logger.info(f"failed fx history: {repr(e)}")
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
asyncio.get_event_loop().create_task(self.get_historical_rates_safe(ccy, cache_dir))
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
async def request_history(self, ccy):
raise NotImplementedError() # implemented by subclasses
async def get_rates(self, ccy):
raise NotImplementedError() # implemented by subclasses
async def get_currencies(self):
rates = await self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class ATAIX(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('api.ataix.com', '/api/prices/ZCR-%s' % ccy)
return {ccy: Decimal(json['result'][0]['last'])}
class BitcoinAverage(ExchangeBase):
async def get_rates(self, ccy):
json1 = await self.get_json('apiv2.bitcoinaverage.com', '/indices/crypto/ticker/ZCRBTC')
if ccy != "BTC":
json2 = await self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/BTC%s' % ccy)
return {ccy: Decimal(json1['last'])*Decimal(json2['last'])}
return {ccy: Decimal(json1['last'])}
class Bittrex(ExchangeBase):
async def get_rates(self, ccy):
json1 = await self.get_json('bittrex.com', '/api/v1.1/public/getticker?market=btc-mona')
if ccy != "BTC":
json2 = await self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/BTC%s' % ccy)
return {ccy: Decimal(json1['result']['Last'])*Decimal(json2['last'])}
return {ccy: Decimal(json1['result']['Last'])}
class Bitbank(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('public.bitbank.cc', '/mona_%s/ticker' % ccy.lower())
return {ccy: Decimal(json['data']['last'])}
class Coincheck(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('coincheck.com', '/api/rate/mona_%s' % ccy.lower())
return {ccy: Decimal(json['rate'])}
class CoinGecko(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('api.coingecko.com',
'/api/v3/simple/price?ids=zcore&vs_currencies=%s' % ccy)
return {ccy: Decimal(json['zcore'][ccy.lower()])}
def history_ccys(self):
# CoinGecko seems to have historical data for all ccys it supports
return CURRENCIES[self.name()]
async def request_history(self, ccy):
history = await self.get_json('api.coingecko.com',
'/api/v3/coins/zcore/market_chart?vs_currency=%s&days=max' % ccy)
return dict([(datetime.utcfromtimestamp(h[0]/1000).strftime('%Y-%m-%d'), h[1])
for h in history['prices']])
class CryptBridge(ExchangeBase):
async def get_rates(self, ccy):
json1 = await self.get_json('api.crypto-bridge.org', '/api/v1/ticker/ZCR_BTC')
if ccy != "BTC":
json2 = await self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/BTC%s' % ccy)
return {ccy: Decimal(json1['last'])*Decimal(json2['last'])}
return {ccy: Decimal(json1['last'])}
class Fisco(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('api.fcce.jp', '/api/1/last_price/mona_%s' % ccy.lower())
return {ccy: Decimal(json['last_price'])}
class NebliDex(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('www.neblidex.xyz', '/seed/?v=1&api=get_market_price&market=ZCR/%s' % ccy)
return {ccy: Decimal(json)}
class TradeSatoshi(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('tradesatoshi.com', '/api/public/getmarketsummary?market=ZCR_BTC')
return {'BTC': Decimal(json['result']['last'])}
class Zaif(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('api.zaif.jp', '/api/1/last_price/mona_%s' % ccy.lower())
return {ccy: Decimal(json['last_price'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
# load currencies.json from disk
path = resource_path('currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
# or if not present, generate it now.
print("cannot find currencies.json. will regenerate it now.")
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
async def get_currencies_safe(name, exchange):
try:
d[name] = await exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
async def query_all_exchanges_for_their_ccys_over_network():
async with timeout_after(10):
async with TaskGroup() as group:
for name, klass in exchanges.items():
exchange = klass(None, None)
await group.spawn(get_currencies_safe(name, exchange))
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(query_all_exchanges_for_their_ccys_over_network())
except Exception as e:
pass
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config: SimpleConfig, network: Network):
ThreadJob.__init__(self)
self.config = config
self.network = network
if self.network:
self.network.register_callback(self.set_proxy, ['proxy_set'])
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self._trigger = asyncio.Event()
self._trigger.set()
self.set_exchange(self.config_exchange())
make_dir(self.cache_dir)
def set_proxy(self, trigger_name, *args):
self._trigger.set()
@staticmethod
def get_currencies(history: bool) -> Sequence[str]:
d = get_exchanges_by_ccy(history)
return sorted(d.keys())
@staticmethod
def get_exchanges_by_ccy(ccy: str, history: bool) -> Sequence[str]:
d = get_exchanges_by_ccy(history)
return d.get(ccy, [])
@staticmethod
def remove_thousands_separator(text):
return text.replace(',', '') # FIXME use THOUSAND_SEPARATOR in util
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 8)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec)) # FIXME use util.THOUSAND_SEPARATOR and util.DECIMAL_POINT
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
async def run(self):
while True:
# approx. every 2.5 minutes, refresh spot price
try:
async with timeout_after(150):
await self._trigger.wait()
self._trigger.clear()
# we were manually triggered, so get historical rates
if self.is_enabled() and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
except TaskTimeout:
pass
if self.is_enabled():
await self.exchange.update_safe(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate', DEFAULT_ENABLED))
def set_enabled(self, b):
self.config.set_key('use_exchange_rate', bool(b))
self.trigger_update()
def get_history_config(self, *, default=False):
return bool(self.config.get('history_rates', default))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", DEFAULT_CURRENCY)
def config_exchange(self):
return self.config.get('use_exchange', DEFAULT_EXCHANGE)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.trigger_update()
self.on_quotes()
def trigger_update(self):
if self.network:
self.network.asyncio_loop.call_soon_threadsafe(self._trigger.set)
def set_exchange(self, name):
class_ = globals().get(name) or globals().get(DEFAULT_EXCHANGE)
self.logger.info(f"using exchange {name}")
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
assert issubclass(class_, ExchangeBase), f"unexpected type {class_} for {name}"
self.exchange = class_(self.on_quotes, self.on_history) # type: ExchangeBase
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.trigger_update()
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self) -> Decimal:
"""Returns the exchange rate as a Decimal"""
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate in ('NaN', None) and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
if rate is None:
rate = 'NaN'
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
assert globals().get(DEFAULT_EXCHANGE), f"default exchange {DEFAULT_EXCHANGE} does not exist"
|
the-stack_0_21489 | import struct
from dataclasses import dataclass
from enum import Enum, IntEnum, unique
from typing import Any
class IDProto(IntEnum):
ID_LEN_ID_TYPE = 1
ID_LEN_SUBTASK_ID = 1
ID_LEN_RESERVE = 2
ID_LEN_USER_ID = 4
ID_LEN_REPO_ID = 6
ID_LEN_HEX_TASK_ID = 16
ID_LENGTH = (
ID_LEN_ID_TYPE + ID_LEN_SUBTASK_ID + ID_LEN_RESERVE + ID_LEN_USER_ID + ID_LEN_REPO_ID + ID_LEN_HEX_TASK_ID
)
@unique
class IDType(Enum):
ID_TYPE_UNKNOWN = "z"
ID_TYPE_ASSET = "a"
ID_TYPE_COMMIT = "c"
ID_TYPE_TASK = "t"
ID_TYPE_REPO = "r"
ID_TYPE_USER = "u"
@dataclass
class TaskId:
id_type: str
sub_task_id: str
id_reserve: str
user_id: str
repo_id: str
hex_task_id: str
def __post_init__(self) -> None:
if len(self.id_type) != IDProto.ID_LEN_ID_TYPE:
raise ValueError(f"Invalid id_type: {self.id_type}")
if len(self.sub_task_id) != IDProto.ID_LEN_SUBTASK_ID:
raise ValueError(f"Invalid sub_task_id: {self.sub_task_id}")
if len(self.id_reserve) != IDProto.ID_LEN_RESERVE:
raise ValueError(f"Invalid id_reserve: {self.id_reserve}")
if len(self.user_id) != IDProto.ID_LEN_USER_ID:
raise ValueError(f"Invalid user_id: {self.user_id}")
if len(self.repo_id) != IDProto.ID_LEN_REPO_ID:
raise ValueError(f"Invalid repo_id: {self.repo_id}")
if len(self.hex_task_id) != IDProto.ID_LEN_HEX_TASK_ID:
raise ValueError(f"Invalid hex_task_id: {self.hex_task_id}")
def __str__(self) -> str:
return f"{self.id_type}{self.sub_task_id}{self.id_reserve}{self.user_id}{self.repo_id}{self.hex_task_id}"
@classmethod
def from_task_id(cls, task_id: str) -> Any:
fmt = "1s1s2s4s6s16s"
components = struct.unpack(fmt, task_id.encode())
return cls(*(c.decode() for c in components))
|
the-stack_0_21490 | import importlib
import sys
from pathlib import Path
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
import numpy as np
from pyqtgraph.widgets.FeedbackButton import FeedbackButton
# NeuroPy (Diba Lab Python Repo) Loading
try:
from neuropy import core
importlib.reload(core)
except ImportError:
sys.path.append(r"C:\Users\Pho\repos\NeuroPy") # Windows
# sys.path.append('/home/pho/repo/BapunAnalysis2021/NeuroPy') # Linux
# sys.path.append(r'/Users/pho/repo/Python Projects/NeuroPy') # MacOS
print("neuropy module not found, adding directory to sys.path. \n >> Updated sys.path.")
from neuropy import core
from neuropy.core.neurons import NeuronType
def checkTableWidgetExample(title='PhoCheckTableWidgetExampleApp'):
app = pg.mkQApp(title)
# w = pg.CheckTable(['Column 1','Column 2','Column 3'])
# col_labels = ['pre', 'maze1', 'post1', 'maze2', 'post2']
# col_labels = ['pre', 'maze1', 'post1', 'maze2', 'post2']
# col_labels = NeuronType.longClassNames()
col_labels = NeuronType.__members__
w = pg.CheckTable(col_labels)
w.layout.setSpacing(10)
def on_add_row_clicked(evt):
w.addRow('New')
def on_table_check_changed(row, col, state):
# note row: int, col: str, state: 0 for unchecked or 2 for checked
print(f'on_table_check_changed(row: {row}, col: {col}, state: {state})')
w.sigStateChanged.connect(on_table_check_changed)
window = QtWidgets.QWidget()
layout = QtGui.QVBoxLayout()
layout.addWidget(w)
addRowBtn = QtWidgets.QPushButton('Add Row')
addRowBtn.setObjectName("addRowBtn")
addRowBtn.clicked.connect(on_add_row_clicked)
layout.addWidget(addRowBtn)
window.setLayout(layout)
# w.show()
window.show()
window.resize(500,500)
window.setWindowTitle('pyqtgraph example: CheckTable')
# w.resize(500,500)
# w.setWindowTitle('pyqtgraph example: CheckTable')
rows_data = [f'row[{i}]' for i in np.arange(8)]
w.updateRows(rows_data)
return window, app
if __name__ == '__main__':
win, app = checkTableWidgetExample()
pg.exec()
|
the-stack_0_21491 | # table definition
table = {
'table_name' : 'ap_tran_alloc',
'module_id' : 'ap',
'short_descr' : 'Ap allocation transaction',
'long_descr' : 'Ap allocation transaction',
'sub_types' : None,
'sub_trans' : None,
'sequence' : None,
'tree_params' : None,
'roll_params' : None,
'indexes' : None,
'ledger_col' : 'item_row_id>supp_row_id>ledger_row_id',
'defn_company' : None,
'data_company' : None,
'read_only' : False,
}
# column definitions
cols = []
cols.append ({
'col_name' : 'row_id',
'data_type' : 'AUTO',
'short_descr': 'Row id',
'long_descr' : 'Row id',
'col_head' : 'Row',
'key_field' : 'Y',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'created_id',
'data_type' : 'INT',
'short_descr': 'Created id',
'long_descr' : 'Created row id',
'col_head' : 'Created',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'deleted_id',
'data_type' : 'INT',
'short_descr': 'Deleted id',
'long_descr' : 'Deleted row id',
'col_head' : 'Deleted',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'item_row_id',
'data_type' : 'INT',
'short_descr': 'Trans item id',
'long_descr' : 'Transaction item row id',
'col_head' : 'Item id',
'key_field' : 'A',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : [
['no_alloc_inv', 'Cannot allocate an invoice', [
['check', '', 'item_row_id>tran_type', '!=', "'ap_inv'", ''],
]],
['check_posted', 'Transaction must be posted', [
['check', '', 'item_row_id>tran_row_id>posted', 'is', '$True', ''],
]],
],
'fkey' : ['ap_openitems', 'row_id', None, None, False, None],
'choices' : None,
})
cols.append ({
'col_name' : 'alloc_no',
'data_type' : 'INT',
'short_descr': 'Allocation number',
'long_descr' : 'Allocation number for this item',
'col_head' : 'Alloc no',
'key_field' : 'A',
'data_source': 'prog',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'tran_date',
'data_type' : 'DTE',
'short_descr': 'Transaction date',
'long_descr' : 'Transaction date - used for credit note date if discount allowed',
'col_head' : 'Date',
'key_field' : 'N',
'data_source': 'calc',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : (
'<alloc_tran_date/>'
),
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'posted',
'data_type' : 'BOOL',
'short_descr': 'Posted?',
'long_descr' : 'Has transaction been posted?',
'col_head' : 'Posted?',
'key_field' : 'N',
'data_source': 'prog',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : 'false',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
# virtual column definitions
virt = []
# virt.append ({
# 'col_name' : 'supp_row_id',
# 'data_type' : 'INT',
# 'short_descr': 'Supplier row id',
# 'long_descr' : 'Supplier row id',
# 'col_head' : 'Supp row_id',
# 'fkey' : ['ap_suppliers', 'row_id', None, None, False, None],
# 'sql' : 'a.item_row_id>supp_row_id'
# })
virt.append ({
'col_name' : 'tran_exch_rate',
'data_type' : 'DEC',
'short_descr': 'Transaction exchange rate',
'long_descr' : 'Exchange rate from transaction currency to local',
'col_head' : 'Rate tran',
'db_scale' : 8,
'scale_ptr' : None,
'dflt_rule' : (
'<expr>'
'<fld_val name="item_row_id>amount_supp"/>'
'<op type="/"/>'
'<fld_val name="item_row_id>amount_local"/>'
'</expr>'
),
'sql' : 'a.item_row_id>amount_supp / a.item_row_id>amount_local',
})
virt.append ({
'col_name' : 'det_exists',
'data_type' : 'BOOL',
'short_descr': 'Detail row exists?',
'long_descr' : 'Have any detail lines been entered?',
'col_head' : '',
'sql' : (
"CASE WHEN EXISTS(SELECT * FROM {company}.ap_allocations b "
"WHERE b.tran_row_id = a.row_id) THEN $True ELSE $False END"
),
})
virt.append ({
'col_name' : 'unallocated',
'data_type' : '$PTY',
'short_descr': 'Unallocated',
'long_descr' : 'Balance of transaction not allocated',
'col_head' : 'Unalloc',
'db_scale' : 2,
'scale_ptr' : 'item_row_id>supp_row_id>currency_id>scale',
'dflt_val' : '0',
'dflt_rule' : None,
'sql' : (
"a.item_row_id>amount_supp "
"+ "
"COALESCE(("
"SELECT b.alloc_supp FROM {company}.ap_allocations b "
"WHERE b.tran_row_id = a.row_id AND b.deleted_id = 0"
"), 0)"
),
})
# cursor definitions
cursors = []
cursors.append({
'cursor_name': 'unposted_alloc',
'title': 'Unposted ap allocations',
'columns': [
['item_row_id>supp_row_id>party_row_id>party_id', 80, False, True],
['item_row_id>supp_row_id>party_row_id>display_name', 160, True, True],
['tran_date', 80, False, True],
['item_row_id>tran_type', 60, False, True],
['item_row_id>tran_number', 80, False, True],
['item_row_id>balance_supp', 120, False, True],
],
'filter': [
['where', '', 'posted', '=', "'0'", ''],
],
'sequence': [['tran_date', False]],
'formview_name': 'ap_alloc',
})
# actions
actions = []
actions.append([
'upd_on_post', [
[
'ap_allocations',
[ # condition
['where', '', '_ctx.tot_alloc_supp', 'pyfunc', 'custom.aptrans_funcs.get_tot_alloc', ''],
],
False, # split source?
[ # key fields
# ['tran_row_id', 'row_id'], # tgt_col, op, src_col
['item_row_id', 'item_row_id'], # tgt_col, op, src_col
],
[], # aggregation
[ # on post
['alloc_supp', '-', '_ctx.tot_alloc_supp'], # tgt_col, op, src_col
['alloc_local', '-', '_ctx.tot_alloc_local'],
],
[], # on unpost
],
[
'ap_tran_disc',
[ # condition
['where', '', '_ctx.tot_disc_supp', '!=', '0', ''],
],
False, # split source?
[ # key fields
['supp_row_id', 'item_row_id>supp_row_id'], # tgt_col, op, src_col
],
[], # aggregation
[ # on post
['tran_date', '=', 'tran_date'], # tgt_col, op, src_col
['tran_exch_rate', '=', 'tran_exch_rate'],
['discount_supp', '=', '_ctx.tot_disc_supp'],
['discount_local', '=', '_ctx.tot_disc_local'],
['orig_item_id', '=', 'item_row_id'],
],
[], # on unpost
[ # return values
['_ctx.disc_row_id', 'row_id'], # tgt_col, src_col
],
],
],
])
actions.append([
'before_post',
'<assign src="$None" tgt="_ctx.disc_row_id"/>'
])
actions.append([
'after_post',
'<case>'
'<compare test="[[`if`, ``, `_ctx.disc_row_id`, `is not`, `$None`, ``]]">'
'<pyfunc name="custom.aptrans_funcs.post_alloc_crn"/>'
'</compare>'
'</case>'
])
|
the-stack_0_21495 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from collections import OrderedDict, namedtuple
from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
import attr
from prometheus_client import Counter
from twisted.internet import defer
import synapse.metrics
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
from synapse.api.room_versions import RoomVersions
from synapse.crypto.event_signing import compute_event_reference_hash
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.logging.utils import log_function
from synapse.storage._base import db_to_json, make_in_list_sql_clause
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.search import SearchEntry
from synapse.storage.util.id_generators import StreamIdGenerator
from synapse.types import StateMap, get_domain_from_id
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
from synapse.server import HomeServer
from synapse.storage.databases.main import DataStore
logger = logging.getLogger(__name__)
persist_event_counter = Counter("synapse_storage_events_persisted_events", "")
event_counter = Counter(
"synapse_storage_events_persisted_events_sep",
"",
["type", "origin_type", "origin_entity"],
)
def encode_json(json_object):
"""
Encode a Python object as JSON and return it in a Unicode string.
"""
out = frozendict_json_encoder.encode(json_object)
if isinstance(out, bytes):
out = out.decode("utf8")
return out
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
@attr.s(slots=True)
class DeltaState:
"""Deltas to use to update the `current_state_events` table.
Attributes:
to_delete: List of type/state_keys to delete from current state
to_insert: Map of state to upsert into current state
no_longer_in_room: The server is not longer in the room, so the room
should e.g. be removed from `current_state_events` table.
"""
to_delete = attr.ib(type=List[Tuple[str, str]])
to_insert = attr.ib(type=StateMap[str])
no_longer_in_room = attr.ib(type=bool, default=False)
class PersistEventsStore:
"""Contains all the functions for writing events to the database.
Should only be instantiated on one process (when using a worker mode setup).
Note: This is not part of the `DataStore` mixin.
"""
def __init__(
self, hs: "HomeServer", db: DatabasePool, main_data_store: "DataStore"
):
self.hs = hs
self.db_pool = db
self.store = main_data_store
self.database_engine = db.engine
self._clock = hs.get_clock()
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
self.is_mine_id = hs.is_mine_id
# Ideally we'd move these ID gens here, unfortunately some other ID
# generators are chained off them so doing so is a bit of a PITA.
self._backfill_id_gen = self.store._backfill_id_gen # type: StreamIdGenerator
self._stream_id_gen = self.store._stream_id_gen # type: StreamIdGenerator
# This should only exist on instances that are configured to write
assert (
hs.config.worker.writers.events == hs.get_instance_name()
), "Can only instantiate EventsStore on master"
@defer.inlineCallbacks
def _persist_events_and_state_updates(
self,
events_and_contexts: List[Tuple[EventBase, EventContext]],
current_state_for_room: Dict[str, StateMap[str]],
state_delta_for_room: Dict[str, DeltaState],
new_forward_extremeties: Dict[str, List[str]],
backfilled: bool = False,
):
"""Persist a set of events alongside updates to the current state and
forward extremities tables.
Args:
events_and_contexts:
current_state_for_room: Map from room_id to the current state of
the room based on forward extremities
state_delta_for_room: Map from room_id to the delta to apply to
room state
new_forward_extremities: Map from room_id to list of event IDs
that are the new forward extremities of the room.
backfilled
Returns:
Deferred: resolves when the events have been persisted
"""
# We want to calculate the stream orderings as late as possible, as
# we only notify after all events with a lesser stream ordering have
# been persisted. I.e. if we spend 10s inside the with block then
# that will delay all subsequent events from being notified about.
# Hence why we do it down here rather than wrapping the entire
# function.
#
# Its safe to do this after calculating the state deltas etc as we
# only need to protect the *persistence* of the events. This is to
# ensure that queries of the form "fetch events since X" don't
# return events and stream positions after events that are still in
# flight, as otherwise subsequent requests "fetch event since Y"
# will not return those events.
#
# Note: Multiple instances of this function cannot be in flight at
# the same time for the same room.
if backfilled:
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
len(events_and_contexts)
)
else:
stream_ordering_manager = self._stream_id_gen.get_next_mult(
len(events_and_contexts)
)
with stream_ordering_manager as stream_orderings:
for (event, context), stream in zip(events_and_contexts, stream_orderings):
event.internal_metadata.stream_ordering = stream
yield self.db_pool.runInteraction(
"persist_events",
self._persist_events_txn,
events_and_contexts=events_and_contexts,
backfilled=backfilled,
state_delta_for_room=state_delta_for_room,
new_forward_extremeties=new_forward_extremeties,
)
persist_event_counter.inc(len(events_and_contexts))
if not backfilled:
# backfilled events have negative stream orderings, so we don't
# want to set the event_persisted_position to that.
synapse.metrics.event_persisted_position.set(
events_and_contexts[-1][0].internal_metadata.stream_ordering
)
for event, context in events_and_contexts:
if context.app_service:
origin_type = "local"
origin_entity = context.app_service.id
elif self.hs.is_mine_id(event.sender):
origin_type = "local"
origin_entity = "*client*"
else:
origin_type = "remote"
origin_entity = get_domain_from_id(event.sender)
event_counter.labels(event.type, origin_type, origin_entity).inc()
for room_id, new_state in current_state_for_room.items():
self.store.get_current_state_ids.prefill((room_id,), new_state)
for room_id, latest_event_ids in new_forward_extremeties.items():
self.store.get_latest_event_ids_in_room.prefill(
(room_id,), list(latest_event_ids)
)
@defer.inlineCallbacks
def _get_events_which_are_prevs(self, event_ids):
"""Filter the supplied list of event_ids to get those which are prev_events of
existing (non-outlier/rejected) events.
Args:
event_ids (Iterable[str]): event ids to filter
Returns:
Deferred[List[str]]: filtered event ids
"""
results = []
def _get_events_which_are_prevs_txn(txn, batch):
sql = """
SELECT prev_event_id, internal_metadata
FROM event_edges
INNER JOIN events USING (event_id)
LEFT JOIN rejections USING (event_id)
LEFT JOIN event_json USING (event_id)
WHERE
NOT events.outlier
AND rejections.event_id IS NULL
AND
"""
clause, args = make_in_list_sql_clause(
self.database_engine, "prev_event_id", batch
)
txn.execute(sql + clause, args)
results.extend(r[0] for r in txn if not db_to_json(r[1]).get("soft_failed"))
for chunk in batch_iter(event_ids, 100):
yield self.db_pool.runInteraction(
"_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
)
return results
@defer.inlineCallbacks
def _get_prevs_before_rejected(self, event_ids):
"""Get soft-failed ancestors to remove from the extremities.
Given a set of events, find all those that have been soft-failed or
rejected. Returns those soft failed/rejected events and their prev
events (whether soft-failed/rejected or not), and recurses up the
prev-event graph until it finds no more soft-failed/rejected events.
This is used to find extremities that are ancestors of new events, but
are separated by soft failed events.
Args:
event_ids (Iterable[str]): Events to find prev events for. Note
that these must have already been persisted.
Returns:
Deferred[set[str]]
"""
# The set of event_ids to return. This includes all soft-failed events
# and their prev events.
existing_prevs = set()
def _get_prevs_before_rejected_txn(txn, batch):
to_recursively_check = batch
while to_recursively_check:
sql = """
SELECT
event_id, prev_event_id, internal_metadata,
rejections.event_id IS NOT NULL
FROM event_edges
INNER JOIN events USING (event_id)
LEFT JOIN rejections USING (event_id)
LEFT JOIN event_json USING (event_id)
WHERE
NOT events.outlier
AND
"""
clause, args = make_in_list_sql_clause(
self.database_engine, "event_id", to_recursively_check
)
txn.execute(sql + clause, args)
to_recursively_check = []
for event_id, prev_event_id, metadata, rejected in txn:
if prev_event_id in existing_prevs:
continue
soft_failed = db_to_json(metadata).get("soft_failed")
if soft_failed or rejected:
to_recursively_check.append(prev_event_id)
existing_prevs.add(prev_event_id)
for chunk in batch_iter(event_ids, 100):
yield self.db_pool.runInteraction(
"_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
)
return existing_prevs
@log_function
def _persist_events_txn(
self,
txn: LoggingTransaction,
events_and_contexts: List[Tuple[EventBase, EventContext]],
backfilled: bool,
state_delta_for_room: Dict[str, DeltaState] = {},
new_forward_extremeties: Dict[str, List[str]] = {},
):
"""Insert some number of room events into the necessary database tables.
Rejected events are only inserted into the events table, the events_json table,
and the rejections table. Things reading from those table will need to check
whether the event was rejected.
Args:
txn
events_and_contexts: events to persist
backfilled: True if the events were backfilled
delete_existing True to purge existing table rows for the events
from the database. This is useful when retrying due to
IntegrityError.
state_delta_for_room: The current-state delta for each room.
new_forward_extremetie: The new forward extremities for each room.
For each room, a list of the event ids which are the forward
extremities.
"""
all_events_and_contexts = events_and_contexts
min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering
max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering
self._update_forward_extremities_txn(
txn,
new_forward_extremities=new_forward_extremeties,
max_stream_order=max_stream_order,
)
# Ensure that we don't have the same event twice.
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
events_and_contexts
)
self._update_room_depths_txn(
txn, events_and_contexts=events_and_contexts, backfilled=backfilled
)
# _update_outliers_txn filters out any events which have already been
# persisted, and returns the filtered list.
events_and_contexts = self._update_outliers_txn(
txn, events_and_contexts=events_and_contexts
)
# From this point onwards the events are only events that we haven't
# seen before.
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)
# We want to store event_auth mappings for rejected events, as they're
# used in state res v2.
# This is only necessary if the rejected event appears in an accepted
# event's auth chain, but its easier for now just to store them (and
# it doesn't take much storage compared to storing the entire event
# anyway).
self.db_pool.simple_insert_many_txn(
txn,
table="event_auth",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"auth_id": auth_id,
}
for event, _ in events_and_contexts
for auth_id in event.auth_event_ids()
if event.is_state()
],
)
# _store_rejected_events_txn filters out any events which were
# rejected, and returns the filtered list.
events_and_contexts = self._store_rejected_events_txn(
txn, events_and_contexts=events_and_contexts
)
# From this point onwards the events are only ones that weren't
# rejected.
self._update_metadata_tables_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
backfilled=backfilled,
)
# We call this last as it assumes we've inserted the events into
# room_memberships, where applicable.
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
def _update_current_state_txn(
self,
txn: LoggingTransaction,
state_delta_by_room: Dict[str, DeltaState],
stream_id: int,
):
for room_id, delta_state in state_delta_by_room.items():
to_delete = delta_state.to_delete
to_insert = delta_state.to_insert
if delta_state.no_longer_in_room:
# Server is no longer in the room so we delete the room from
# current_state_events, being careful we've already updated the
# rooms.room_version column (which gets populated in a
# background task).
self._upsert_room_version_txn(txn, room_id)
# Before deleting we populate the current_state_delta_stream
# so that async background tasks get told what happened.
sql = """
INSERT INTO current_state_delta_stream
(stream_id, room_id, type, state_key, event_id, prev_event_id)
SELECT ?, room_id, type, state_key, null, event_id
FROM current_state_events
WHERE room_id = ?
"""
txn.execute(sql, (stream_id, room_id))
self.db_pool.simple_delete_txn(
txn, table="current_state_events", keyvalues={"room_id": room_id},
)
else:
# We're still in the room, so we update the current state as normal.
# First we add entries to the current_state_delta_stream. We
# do this before updating the current_state_events table so
# that we can use it to calculate the `prev_event_id`. (This
# allows us to not have to pull out the existing state
# unnecessarily).
#
# The stream_id for the update is chosen to be the minimum of the stream_ids
# for the batch of the events that we are persisting; that means we do not
# end up in a situation where workers see events before the
# current_state_delta updates.
#
sql = """
INSERT INTO current_state_delta_stream
(stream_id, room_id, type, state_key, event_id, prev_event_id)
SELECT ?, ?, ?, ?, ?, (
SELECT event_id FROM current_state_events
WHERE room_id = ? AND type = ? AND state_key = ?
)
"""
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
to_insert.get((etype, state_key)),
room_id,
etype,
state_key,
)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
)
# Now we actually update the current_state_events table
txn.executemany(
"DELETE FROM current_state_events"
" WHERE room_id = ? AND type = ? AND state_key = ?",
(
(room_id, etype, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
)
# We include the membership in the current state table, hence we do
# a lookup when we insert. This assumes that all events have already
# been inserted into room_memberships.
txn.executemany(
"""INSERT INTO current_state_events
(room_id, type, state_key, event_id, membership)
VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
""",
[
(room_id, key[0], key[1], ev_id, ev_id)
for key, ev_id in to_insert.items()
],
)
# We now update `local_current_membership`. We do this regardless
# of whether we're still in the room or not to handle the case where
# e.g. we just got banned (where we need to record that fact here).
# Note: Do we really want to delete rows here (that we do not
# subsequently reinsert below)? While technically correct it means
# we have no record of the fact the user *was* a member of the
# room but got, say, state reset out of it.
if to_delete or to_insert:
txn.executemany(
"DELETE FROM local_current_membership"
" WHERE room_id = ? AND user_id = ?",
(
(room_id, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
if etype == EventTypes.Member and self.is_mine_id(state_key)
),
)
if to_insert:
txn.executemany(
"""INSERT INTO local_current_membership
(room_id, user_id, event_id, membership)
VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
""",
[
(room_id, key[1], ev_id, ev_id)
for key, ev_id in to_insert.items()
if key[0] == EventTypes.Member and self.is_mine_id(key[1])
],
)
txn.call_after(
self.store._curr_state_delta_stream_cache.entity_has_changed,
room_id,
stream_id,
)
# Invalidate the various caches
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# those users.
members_changed = {
state_key
for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
}
for member in members_changed:
txn.call_after(
self.store.get_rooms_for_user_with_stream_ordering.invalidate,
(member,),
)
self.store._invalidate_state_caches_and_stream(
txn, room_id, members_changed
)
def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str):
"""Update the room version in the database based off current state
events.
This is used when we're about to delete current state and we want to
ensure that the `rooms.room_version` column is up to date.
"""
sql = """
SELECT json FROM event_json
INNER JOIN current_state_events USING (room_id, event_id)
WHERE room_id = ? AND type = ? AND state_key = ?
"""
txn.execute(sql, (room_id, EventTypes.Create, ""))
row = txn.fetchone()
if row:
event_json = db_to_json(row[0])
content = event_json.get("content", {})
creator = content.get("creator")
room_version_id = content.get("room_version", RoomVersions.V1.identifier)
self.db_pool.simple_upsert_txn(
txn,
table="rooms",
keyvalues={"room_id": room_id},
values={"room_version": room_version_id},
insertion_values={"is_public": False, "creator": creator},
)
def _update_forward_extremities_txn(
self, txn, new_forward_extremities, max_stream_order
):
for room_id, new_extrem in new_forward_extremities.items():
self.db_pool.simple_delete_txn(
txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
)
txn.call_after(
self.store.get_latest_event_ids_in_room.invalidate, (room_id,)
)
self.db_pool.simple_insert_many_txn(
txn,
table="event_forward_extremities",
values=[
{"event_id": ev_id, "room_id": room_id}
for room_id, new_extrem in new_forward_extremities.items()
for ev_id in new_extrem
],
)
# We now insert into stream_ordering_to_exterm a mapping from room_id,
# new stream_ordering to new forward extremeties in the room.
# This allows us to later efficiently look up the forward extremeties
# for a room before a given stream_ordering
self.db_pool.simple_insert_many_txn(
txn,
table="stream_ordering_to_exterm",
values=[
{
"room_id": room_id,
"event_id": event_id,
"stream_ordering": max_stream_order,
}
for room_id, new_extrem in new_forward_extremities.items()
for event_id in new_extrem
],
)
@classmethod
def _filter_events_and_contexts_for_duplicates(cls, events_and_contexts):
"""Ensure that we don't have the same event twice.
Pick the earliest non-outlier if there is one, else the earliest one.
Args:
events_and_contexts (list[(EventBase, EventContext)]):
Returns:
list[(EventBase, EventContext)]: filtered list
"""
new_events_and_contexts = OrderedDict()
for event, context in events_and_contexts:
prev_event_context = new_events_and_contexts.get(event.event_id)
if prev_event_context:
if not event.internal_metadata.is_outlier():
if prev_event_context[0].internal_metadata.is_outlier():
# To ensure correct ordering we pop, as OrderedDict is
# ordered by first insertion.
new_events_and_contexts.pop(event.event_id, None)
new_events_and_contexts[event.event_id] = (event, context)
else:
new_events_and_contexts[event.event_id] = (event, context)
return list(new_events_and_contexts.values())
def _update_room_depths_txn(self, txn, events_and_contexts, backfilled):
"""Update min_depth for each room
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
backfilled (bool): True if the events were backfilled
"""
depth_updates = {}
for event, context in events_and_contexts:
# Remove the any existing cache entries for the event_ids
txn.call_after(self.store._invalidate_get_event_cache, event.event_id)
if not backfilled:
txn.call_after(
self.store._events_stream_cache.entity_has_changed,
event.room_id,
event.internal_metadata.stream_ordering,
)
if not event.internal_metadata.is_outlier() and not context.rejected:
depth_updates[event.room_id] = max(
event.depth, depth_updates.get(event.room_id, event.depth)
)
for room_id, depth in depth_updates.items():
self._update_min_depth_for_room_txn(txn, room_id, depth)
def _update_outliers_txn(self, txn, events_and_contexts):
"""Update any outliers with new event info.
This turns outliers into ex-outliers (unless the new event was
rejected).
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without events which
are already in the events table.
"""
txn.execute(
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
% (",".join(["?"] * len(events_and_contexts)),),
[event.event_id for event, _ in events_and_contexts],
)
have_persisted = {event_id: outlier for event_id, outlier in txn}
to_remove = set()
for event, context in events_and_contexts:
if event.event_id not in have_persisted:
continue
to_remove.add(event)
if context.rejected:
# If the event is rejected then we don't care if the event
# was an outlier or not.
continue
outlier_persisted = have_persisted[event.event_id]
if not event.internal_metadata.is_outlier() and outlier_persisted:
# We received a copy of an event that we had already stored as
# an outlier in the database. We now have some state at that
# so we need to update the state_groups table with that state.
# insert into event_to_state_groups.
try:
self._store_event_state_mappings_txn(txn, ((event, context),))
except Exception:
logger.exception("")
raise
metadata_json = encode_json(event.internal_metadata.get_dict())
sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?"
txn.execute(sql, (metadata_json, event.event_id))
# Add an entry to the ex_outlier_stream table to replicate the
# change in outlier status to our workers.
stream_order = event.internal_metadata.stream_ordering
state_group_id = context.state_group
self.db_pool.simple_insert_txn(
txn,
table="ex_outlier_stream",
values={
"event_stream_ordering": stream_order,
"event_id": event.event_id,
"state_group": state_group_id,
},
)
sql = "UPDATE events SET outlier = ? WHERE event_id = ?"
txn.execute(sql, (False, event.event_id))
# Update the event_backward_extremities table now that this
# event isn't an outlier any more.
self._update_backward_extremeties(txn, [event])
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
def _store_event_txn(self, txn, events_and_contexts):
"""Insert new events into the event and event_json tables
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
"""
if not events_and_contexts:
# nothing to do here
return
def event_dict(event):
d = event.get_dict()
d.pop("redacted", None)
d.pop("redacted_because", None)
return d
self.db_pool.simple_insert_many_txn(
txn,
table="event_json",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"internal_metadata": encode_json(
event.internal_metadata.get_dict()
),
"json": encode_json(event_dict(event)),
"format_version": event.format_version,
}
for event, _ in events_and_contexts
],
)
self.db_pool.simple_insert_many_txn(
txn,
table="events",
values=[
{
"stream_ordering": event.internal_metadata.stream_ordering,
"topological_ordering": event.depth,
"depth": event.depth,
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"processed": True,
"outlier": event.internal_metadata.is_outlier(),
"origin_server_ts": int(event.origin_server_ts),
"received_ts": self._clock.time_msec(),
"sender": event.sender,
"contains_url": (
"url" in event.content and isinstance(event.content["url"], str)
),
}
for event, _ in events_and_contexts
],
)
for event, _ in events_and_contexts:
if not event.internal_metadata.is_redacted():
# If we're persisting an unredacted event we go and ensure
# that we mark any redactions that reference this event as
# requiring censoring.
self.db_pool.simple_update_txn(
txn,
table="redactions",
keyvalues={"redacts": event.event_id},
updatevalues={"have_censored": False},
)
def _store_rejected_events_txn(self, txn, events_and_contexts):
"""Add rows to the 'rejections' table for received events which were
rejected
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without the rejected
events.
"""
# Remove the rejected events from the list now that we've added them
# to the events table and the events_json table.
to_remove = set()
for event, context in events_and_contexts:
if context.rejected:
# Insert the event_id into the rejections table
self._store_rejections_txn(txn, event.event_id, context.rejected)
to_remove.add(event)
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
def _update_metadata_tables_txn(
self, txn, events_and_contexts, all_events_and_contexts, backfilled
):
"""Update all the miscellaneous tables for new events
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
all_events_and_contexts (list[(EventBase, EventContext)]): all
events that we were going to persist. This includes events
we've already persisted, etc, that wouldn't appear in
events_and_context.
backfilled (bool): True if the events were backfilled
"""
# Insert all the push actions into the event_push_actions table.
self._set_push_actions_for_event_and_users_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
)
if not events_and_contexts:
# nothing to do here
return
for event, context in events_and_contexts:
if event.type == EventTypes.Redaction and event.redacts is not None:
# Remove the entries in the event_push_actions table for the
# redacted event.
self._remove_push_actions_for_event_id_txn(
txn, event.room_id, event.redacts
)
# Remove from relations table.
self._handle_redaction(txn, event.redacts)
# Update the event_forward_extremities, event_backward_extremities and
# event_edges tables.
self._handle_mult_prev_events(
txn, events=[event for event, _ in events_and_contexts]
)
for event, _ in events_and_contexts:
if event.type == EventTypes.Name:
# Insert into the event_search table.
self._store_room_name_txn(txn, event)
elif event.type == EventTypes.Topic:
# Insert into the event_search table.
self._store_room_topic_txn(txn, event)
elif event.type == EventTypes.Message:
# Insert into the event_search table.
self._store_room_message_txn(txn, event)
elif event.type == EventTypes.Redaction and event.redacts is not None:
# Insert into the redactions table.
self._store_redaction(txn, event)
elif event.type == EventTypes.Retention:
# Update the room_retention table.
self._store_retention_policy_for_room_txn(txn, event)
self._handle_event_relations(txn, event)
# Store the labels for this event.
labels = event.content.get(EventContentFields.LABELS)
if labels:
self.insert_labels_for_event_txn(
txn, event.event_id, labels, event.room_id, event.depth
)
if self._ephemeral_messages_enabled:
# If there's an expiry timestamp on the event, store it.
expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
if isinstance(expiry_ts, int) and not event.is_state():
self._insert_event_expiry_txn(txn, event.event_id, expiry_ts)
# Insert into the room_memberships table.
self._store_room_members_txn(
txn,
[
event
for event, _ in events_and_contexts
if event.type == EventTypes.Member
],
backfilled=backfilled,
)
# Insert event_reference_hashes table.
self._store_event_reference_hashes_txn(
txn, [event for event, _ in events_and_contexts]
)
state_events_and_contexts = [
ec for ec in events_and_contexts if ec[0].is_state()
]
state_values = []
for event, context in state_events_and_contexts:
vals = {
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"state_key": event.state_key,
}
# TODO: How does this work with backfilling?
if hasattr(event, "replaces_state"):
vals["prev_state"] = event.replaces_state
state_values.append(vals)
self.db_pool.simple_insert_many_txn(
txn, table="state_events", values=state_values
)
# Prefill the event cache
self._add_to_cache(txn, events_and_contexts)
def _add_to_cache(self, txn, events_and_contexts):
to_prefill = []
rows = []
N = 200
for i in range(0, len(events_and_contexts), N):
ev_map = {e[0].event_id: e[0] for e in events_and_contexts[i : i + N]}
if not ev_map:
break
sql = (
"SELECT "
" e.event_id as event_id, "
" r.redacts as redacts,"
" rej.event_id as rejects "
" FROM events as e"
" LEFT JOIN rejections as rej USING (event_id)"
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
" WHERE "
)
clause, args = make_in_list_sql_clause(
self.database_engine, "e.event_id", list(ev_map)
)
txn.execute(sql + clause, args)
rows = self.db_pool.cursor_to_dict(txn)
for row in rows:
event = ev_map[row["event_id"]]
if not row["rejects"] and not row["redacts"]:
to_prefill.append(
_EventCacheEntry(event=event, redacted_event=None)
)
def prefill():
for cache_entry in to_prefill:
self.store._get_event_cache.prefill(
(cache_entry[0].event_id,), cache_entry
)
txn.call_after(prefill)
def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event
txn.call_after(self.store._invalidate_get_event_cache, event.redacts)
self.db_pool.simple_insert_txn(
txn,
table="redactions",
values={
"event_id": event.event_id,
"redacts": event.redacts,
"received_ts": self._clock.time_msec(),
},
)
def insert_labels_for_event_txn(
self, txn, event_id, labels, room_id, topological_ordering
):
"""Store the mapping between an event's ID and its labels, with one row per
(event_id, label) tuple.
Args:
txn (LoggingTransaction): The transaction to execute.
event_id (str): The event's ID.
labels (list[str]): A list of text labels.
room_id (str): The ID of the room the event was sent to.
topological_ordering (int): The position of the event in the room's topology.
"""
return self.db_pool.simple_insert_many_txn(
txn=txn,
table="event_labels",
values=[
{
"event_id": event_id,
"label": label,
"room_id": room_id,
"topological_ordering": topological_ordering,
}
for label in labels
],
)
def _insert_event_expiry_txn(self, txn, event_id, expiry_ts):
"""Save the expiry timestamp associated with a given event ID.
Args:
txn (LoggingTransaction): The database transaction to use.
event_id (str): The event ID the expiry timestamp is associated with.
expiry_ts (int): The timestamp at which to expire (delete) the event.
"""
return self.db_pool.simple_insert_txn(
txn=txn,
table="event_expiry",
values={"event_id": event_id, "expiry_ts": expiry_ts},
)
def _store_event_reference_hashes_txn(self, txn, events):
"""Store a hash for a PDU
Args:
txn (cursor):
events (list): list of Events.
"""
vals = []
for event in events:
ref_alg, ref_hash_bytes = compute_event_reference_hash(event)
vals.append(
{
"event_id": event.event_id,
"algorithm": ref_alg,
"hash": memoryview(ref_hash_bytes),
}
)
self.db_pool.simple_insert_many_txn(
txn, table="event_reference_hashes", values=vals
)
def _store_room_members_txn(self, txn, events, backfilled):
"""Store a room member in the database.
"""
self.db_pool.simple_insert_many_txn(
txn,
table="room_memberships",
values=[
{
"event_id": event.event_id,
"user_id": event.state_key,
"sender": event.user_id,
"room_id": event.room_id,
"membership": event.membership,
"display_name": event.content.get("displayname", None),
"avatar_url": event.content.get("avatar_url", None),
}
for event in events
],
)
for event in events:
txn.call_after(
self.store._membership_stream_cache.entity_has_changed,
event.state_key,
event.internal_metadata.stream_ordering,
)
txn.call_after(
self.store.get_invited_rooms_for_local_user.invalidate,
(event.state_key,),
)
# We update the local_current_membership table only if the event is
# "current", i.e., its something that has just happened.
#
# This will usually get updated by the `current_state_events` handling,
# unless its an outlier, and an outlier is only "current" if it's an "out of
# band membership", like a remote invite or a rejection of a remote invite.
if (
self.is_mine_id(event.state_key)
and not backfilled
and event.internal_metadata.is_outlier()
and event.internal_metadata.is_out_of_band_membership()
):
self.db_pool.simple_upsert_txn(
txn,
table="local_current_membership",
keyvalues={"room_id": event.room_id, "user_id": event.state_key},
values={
"event_id": event.event_id,
"membership": event.membership,
},
)
def _handle_event_relations(self, txn, event):
"""Handles inserting relation data during peristence of events
Args:
txn
event (EventBase)
"""
relation = event.content.get("m.relates_to")
if not relation:
# No relations
return
rel_type = relation.get("rel_type")
if rel_type not in (
RelationTypes.ANNOTATION,
RelationTypes.REFERENCE,
RelationTypes.REPLACE,
):
# Unknown relation type
return
parent_id = relation.get("event_id")
if not parent_id:
# Invalid relation
return
aggregation_key = relation.get("key")
self.db_pool.simple_insert_txn(
txn,
table="event_relations",
values={
"event_id": event.event_id,
"relates_to_id": parent_id,
"relation_type": rel_type,
"aggregation_key": aggregation_key,
},
)
txn.call_after(self.store.get_relations_for_event.invalidate_many, (parent_id,))
txn.call_after(
self.store.get_aggregation_groups_for_event.invalidate_many, (parent_id,)
)
if rel_type == RelationTypes.REPLACE:
txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,))
def _handle_redaction(self, txn, redacted_event_id):
"""Handles receiving a redaction and checking whether we need to remove
any redacted relations from the database.
Args:
txn
redacted_event_id (str): The event that was redacted.
"""
self.db_pool.simple_delete_txn(
txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
)
def _store_room_topic_txn(self, txn, event):
if hasattr(event, "content") and "topic" in event.content:
self.store_event_search_txn(
txn, event, "content.topic", event.content["topic"]
)
def _store_room_name_txn(self, txn, event):
if hasattr(event, "content") and "name" in event.content:
self.store_event_search_txn(
txn, event, "content.name", event.content["name"]
)
def _store_room_message_txn(self, txn, event):
if hasattr(event, "content") and "body" in event.content:
self.store_event_search_txn(
txn, event, "content.body", event.content["body"]
)
def _store_retention_policy_for_room_txn(self, txn, event):
if hasattr(event, "content") and (
"min_lifetime" in event.content or "max_lifetime" in event.content
):
if (
"min_lifetime" in event.content
and not isinstance(event.content.get("min_lifetime"), int)
) or (
"max_lifetime" in event.content
and not isinstance(event.content.get("max_lifetime"), int)
):
# Ignore the event if one of the value isn't an integer.
return
self.db_pool.simple_insert_txn(
txn=txn,
table="room_retention",
values={
"room_id": event.room_id,
"event_id": event.event_id,
"min_lifetime": event.content.get("min_lifetime"),
"max_lifetime": event.content.get("max_lifetime"),
},
)
self.store._invalidate_cache_and_stream(
txn, self.store.get_retention_policy_for_room, (event.room_id,)
)
def store_event_search_txn(self, txn, event, key, value):
"""Add event to the search table
Args:
txn (cursor):
event (EventBase):
key (str):
value (str):
"""
self.store.store_search_entries_txn(
txn,
(
SearchEntry(
key=key,
value=value,
event_id=event.event_id,
room_id=event.room_id,
stream_ordering=event.internal_metadata.stream_ordering,
origin_server_ts=event.origin_server_ts,
),
),
)
def _set_push_actions_for_event_and_users_txn(
self, txn, events_and_contexts, all_events_and_contexts
):
"""Handles moving push actions from staging table to main
event_push_actions table for all events in `events_and_contexts`.
Also ensures that all events in `all_events_and_contexts` are removed
from the push action staging area.
Args:
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
all_events_and_contexts (list[(EventBase, EventContext)]): all
events that we were going to persist. This includes events
we've already persisted, etc, that wouldn't appear in
events_and_context.
"""
sql = """
INSERT INTO event_push_actions (
room_id, event_id, user_id, actions, stream_ordering,
topological_ordering, notif, highlight
)
SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight
FROM event_push_actions_staging
WHERE event_id = ?
"""
if events_and_contexts:
txn.executemany(
sql,
(
(
event.room_id,
event.internal_metadata.stream_ordering,
event.depth,
event.event_id,
)
for event, _ in events_and_contexts
),
)
for event, _ in events_and_contexts:
user_ids = self.db_pool.simple_select_onecol_txn(
txn,
table="event_push_actions_staging",
keyvalues={"event_id": event.event_id},
retcol="user_id",
)
for uid in user_ids:
txn.call_after(
self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many,
(event.room_id, uid),
)
# Now we delete the staging area for *all* events that were being
# persisted.
txn.executemany(
"DELETE FROM event_push_actions_staging WHERE event_id = ?",
((event.event_id,) for event, _ in all_events_and_contexts),
)
def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id):
# Sad that we have to blow away the cache for the whole room here
txn.call_after(
self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many,
(room_id,),
)
txn.execute(
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
(room_id, event_id),
)
def _store_rejections_txn(self, txn, event_id, reason):
self.db_pool.simple_insert_txn(
txn,
table="rejections",
values={
"event_id": event_id,
"reason": reason,
"last_check": self._clock.time_msec(),
},
)
def _store_event_state_mappings_txn(
self, txn, events_and_contexts: Iterable[Tuple[EventBase, EventContext]]
):
state_groups = {}
for event, context in events_and_contexts:
if event.internal_metadata.is_outlier():
continue
# if the event was rejected, just give it the same state as its
# predecessor.
if context.rejected:
state_groups[event.event_id] = context.state_group_before_event
continue
state_groups[event.event_id] = context.state_group
self.db_pool.simple_insert_many_txn(
txn,
table="event_to_state_groups",
values=[
{"state_group": state_group_id, "event_id": event_id}
for event_id, state_group_id in state_groups.items()
],
)
for event_id, state_group_id in state_groups.items():
txn.call_after(
self.store._get_state_group_for_event.prefill,
(event_id,),
state_group_id,
)
def _update_min_depth_for_room_txn(self, txn, room_id, depth):
min_depth = self.store._get_min_depth_interaction(txn, room_id)
if min_depth is not None and depth >= min_depth:
return
self.db_pool.simple_upsert_txn(
txn,
table="room_depth",
keyvalues={"room_id": room_id},
values={"min_depth": depth},
)
def _handle_mult_prev_events(self, txn, events):
"""
For the given event, update the event edges table and forward and
backward extremities tables.
"""
self.db_pool.simple_insert_many_txn(
txn,
table="event_edges",
values=[
{
"event_id": ev.event_id,
"prev_event_id": e_id,
"room_id": ev.room_id,
"is_state": False,
}
for ev in events
for e_id in ev.prev_event_ids()
],
)
self._update_backward_extremeties(txn, events)
def _update_backward_extremeties(self, txn, events):
"""Updates the event_backward_extremities tables based on the new/updated
events being persisted.
This is called for new events *and* for events that were outliers, but
are now being persisted as non-outliers.
Forward extremities are handled when we first start persisting the events.
"""
events_by_room = {}
for ev in events:
events_by_room.setdefault(ev.room_id, []).append(ev)
query = (
"INSERT INTO event_backward_extremities (event_id, room_id)"
" SELECT ?, ? WHERE NOT EXISTS ("
" SELECT 1 FROM event_backward_extremities"
" WHERE event_id = ? AND room_id = ?"
" )"
" AND NOT EXISTS ("
" SELECT 1 FROM events WHERE event_id = ? AND room_id = ? "
" AND outlier = ?"
" )"
)
txn.executemany(
query,
[
(e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False)
for ev in events
for e_id in ev.prev_event_ids()
if not ev.internal_metadata.is_outlier()
],
)
query = (
"DELETE FROM event_backward_extremities"
" WHERE event_id = ? AND room_id = ?"
)
txn.executemany(
query,
[
(ev.event_id, ev.room_id)
for ev in events
if not ev.internal_metadata.is_outlier()
],
)
|
the-stack_0_21496 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from unittest.mock import Mock, patch
from urllib.parse import urlparse
from azure.core.exceptions import ClientAuthenticationError
from azure.core.pipeline.policies import SansIOHTTPPolicy
from azure.identity._constants import EnvironmentVariables
from azure.identity._internal.user_agent import USER_AGENT
from azure.identity.aio import AuthorizationCodeCredential
import msal
import pytest
from helpers import build_aad_response, mock_response, Request
from helpers_async import async_validating_transport, AsyncMockTransport
pytestmark = pytest.mark.asyncio
async def test_no_scopes():
"""The credential should raise ValueError when get_token is called with no scopes"""
credential = AuthorizationCodeCredential("tenant-id", "client-id", "auth-code", "http://localhost")
with pytest.raises(ValueError):
await credential.get_token()
async def test_policies_configurable():
policy = Mock(spec_set=SansIOHTTPPolicy, on_request=Mock())
async def send(*_, **__):
return mock_response(json_payload=build_aad_response(access_token="**"))
credential = AuthorizationCodeCredential(
"tenant-id", "client-id", "auth-code", "http://localhost", policies=[policy], transport=Mock(send=send)
)
await credential.get_token("scope")
assert policy.on_request.called
async def test_close():
transport = AsyncMockTransport()
credential = AuthorizationCodeCredential(
"tenant-id", "client-id", "auth-code", "http://localhost", transport=transport
)
await credential.close()
assert transport.__aexit__.call_count == 1
async def test_context_manager():
transport = AsyncMockTransport()
credential = AuthorizationCodeCredential(
"tenant-id", "client-id", "auth-code", "http://localhost", transport=transport
)
async with credential:
assert transport.__aenter__.call_count == 1
assert transport.__aenter__.call_count == 1
assert transport.__aexit__.call_count == 1
async def test_user_agent():
transport = async_validating_transport(
requests=[Request(required_headers={"User-Agent": USER_AGENT})],
responses=[mock_response(json_payload=build_aad_response(access_token="**"))],
)
credential = AuthorizationCodeCredential(
"tenant-id", "client-id", "auth-code", "http://localhost", transport=transport
)
await credential.get_token("scope")
async def test_tenant_id():
transport = async_validating_transport(
requests=[Request(required_headers={"User-Agent": USER_AGENT})],
responses=[mock_response(json_payload=build_aad_response(access_token="**"))],
)
credential = AuthorizationCodeCredential(
"tenant-id", "client-id", "auth-code", "http://localhost", transport=transport
)
await credential.get_token("scope", tenant_id="tenant_id")
async def test_auth_code_credential():
client_id = "client id"
tenant_id = "tenant"
expected_code = "auth code"
redirect_uri = "https://localhost"
expected_access_token = "access"
expected_refresh_token = "refresh"
expected_scope = "scope"
auth_response = build_aad_response(access_token=expected_access_token, refresh_token=expected_refresh_token)
transport = async_validating_transport(
requests=[
Request( # first call should redeem the auth code
url_substring=tenant_id,
required_data={
"client_id": client_id,
"code": expected_code,
"grant_type": "authorization_code",
"redirect_uri": redirect_uri,
"scope": expected_scope,
},
),
Request( # third call should redeem the refresh token
url_substring=tenant_id,
required_data={
"client_id": client_id,
"grant_type": "refresh_token",
"refresh_token": expected_refresh_token,
"scope": expected_scope,
},
),
],
responses=[mock_response(json_payload=auth_response)] * 2,
)
cache = msal.TokenCache()
credential = AuthorizationCodeCredential(
client_id=client_id,
tenant_id=tenant_id,
authorization_code=expected_code,
redirect_uri=redirect_uri,
transport=transport,
cache=cache,
)
# first call should redeem the auth code
token = await credential.get_token(expected_scope)
assert token.token == expected_access_token
assert transport.send.call_count == 1
# no auth code -> credential should return cached token
token = await credential.get_token(expected_scope)
assert token.token == expected_access_token
assert transport.send.call_count == 1
# no auth code, no cached token -> credential should redeem refresh token
cached_access_token = cache.find(cache.CredentialType.ACCESS_TOKEN)[0]
cache.remove_at(cached_access_token)
token = await credential.get_token(expected_scope)
assert token.token == expected_access_token
assert transport.send.call_count == 2
async def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
second_token = first_token * 2
async def send(request, **_):
parsed = urlparse(request.url)
tenant = parsed.path.split("/")[1]
assert tenant in (first_tenant, second_tenant), 'unexpected tenant "{}"'.format(tenant)
token = first_token if tenant == first_tenant else second_token
return mock_response(json_payload=build_aad_response(access_token=token, refresh_token="**"))
credential = AuthorizationCodeCredential(
first_tenant,
"client-id",
"authcode",
"https://localhost",
transport=Mock(send=send),
)
token = await credential.get_token("scope")
assert token.token == first_token
token = await credential.get_token("scope", tenant_id=first_tenant)
assert token.token == first_token
token = await credential.get_token("scope", tenant_id=second_tenant)
assert token.token == second_token
# should still default to the first tenant
token = await credential.get_token("scope")
assert token.token == first_token
async def test_multitenant_authentication_not_allowed():
expected_tenant = "expected-tenant"
expected_token = "***"
async def send(request, **_):
parsed = urlparse(request.url)
tenant = parsed.path.split("/")[1]
token = expected_token if tenant == expected_tenant else expected_token * 2
return mock_response(json_payload=build_aad_response(access_token=token, refresh_token="**"))
credential = AuthorizationCodeCredential(
expected_tenant, "client-id", "authcode", "https://localhost", transport=Mock(send=send)
)
token = await credential.get_token("scope")
assert token.token == expected_token
token = await credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
assert token.token == expected_token * 2
with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
assert token.token == expected_token
|
the-stack_0_21498 | import time
import copy
import base64
import logging
import redis
class HippoDataSource(object):
def __init__(self, hippo_queue, working_count, task_class, hippo_redis, namespace='', inputs=None):
self.hippo_queue = hippo_queue
self.hippo_redis = hippo_redis
self.working_count = working_count
self.definition = copy.copy(self.hippo_queue.definition)
self.last_run_tstamp = self.definition['queue'].get('last_run_tstamp')
self.last_task_queued_tstamp = self.definition['queue'].get('last_task_queued_tstamp')
self.frequency_seconds = self.definition['queue'].get('frequency_seconds',60)
self.max_concurrent = self.definition['queue'].get('max_concurrent',self.definition.get('max_concurrent',10000))
self.batch_size = self.definition['queue'].get('batch_size',1)
self.batch_separator = self.definition['queue'].get('batch_separator','|')
self.new_task_limit = self.max_concurrent * self.batch_size - working_count
self.task_class = task_class
if inputs:
for input in inputs:
ns = self.definition['queue'].get(namespace,{})
default = inputs[input].get('default')
setattr(self,input,ns.get(input,default))
def too_soon(self):
cur_tstamp = int(time.time())
if self.last_run_tstamp and cur_tstamp < self.last_run_tstamp + self.frequency_seconds:
return True
return False
def process(self):
# stub, this should be implemented by child classes
pass
def process_source(self):
# update last_run_tstamp before we process in case processing takes a few seconds
self.hippo_queue.definition['queue']['last_run_tstamp'] = int(time.time())
try:
self.hippo_queue.save()
except redis.exceptions.ConnectionError:
logging.warning('Redis Connection Error in Queue Worker Thread')
try:
self.process()
except Exception as e:
logging.warning('Error processing queue data source')
logging.warning(e)
# save queue again in case the processing updated any variables
try:
self.hippo_queue.save()
except redis.exceptions.ConnectionError:
logging.warning('Redis Connection Error in Queue Worker Thread')
def create_task_tuples(self, item_tuples):
# tuples are str and timestamp pairs. Combined they form a unique key for a processing item,
# and this function will prevent duplicate processings within a 24 hour window
ok_items = []
for item, tstamp in item_tuples:
key = 'hippo:tasktuple:' + str(self.hippo_queue.id) + '_' + base64.b64encode(item.encode()).decode().replace('=','') + '_' + str(tstamp)
val = self.hippo_redis.get(key)
if val:
logging.warning('Skipping task creation because ' + item + ' ' + str(tstamp) + ' has already been processed')
else:
ok_items.append(item)
self.hippo_redis.set(key,'processed',ex=86400)
if ok_items:
self.create_tasks(ok_items)
def create_tasks(self, items):
if items:
self.hippo_queue.definition['queue']['last_task_queued_tstamp'] = int(time.time())
chunks = [items[i:i + self.batch_size] for i in range(0, len(items), self.batch_size)]
for batch in chunks:
data = self.batch_separator.join([s.decode() if not isinstance(s,str) else s for s in batch])
b64data = base64.b64encode(data.encode()).decode()
task_def = copy.deepcopy(self.definition)
del task_def['queue']
task_def['max_concurrent'] = self.max_concurrent
task_def['cmd'] = task_def['cmd'].replace('$HIPPO_DATA_BASE64',b64data).replace('$HIPPO_DATA',data)
task_def.setdefault('env',{})
for env_name in task_def['env']:
task_def['env'][env_name] = task_def['env'][env_name].replace('$HIPPO_DATA_BASE64',b64data).replace('$HIPPO_DATA',data)
task_def['env']['HIPPO_DATA'] = data
task_def['env']['HIPPO_DATA_BASE64'] = b64data
task = self.task_class(definition=task_def, redis_client=self.hippo_redis)
task.queue()
|
the-stack_0_21499 |
"""
Module for rendering axes.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import vtk
class Axes(object):
"""
Axes object.
"""
def __init__(self, renWinInteract):
# create axes
self._axes = vtk.vtkAxesActor()
self._axes.SetShaftTypeToCylinder()
self._axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetColor(1, 0, 0)
self._axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetFontFamilyToArial()
self._axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
self._axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetColor(0, 1, 0)
self._axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetFontFamilyToArial()
self._axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
self._axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetColor(0, 0, 1)
self._axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetFontFamilyToArial()
self._axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
# create axes marker
self._marker = vtk.vtkOrientationMarkerWidget()
self._marker.SetInteractor(renWinInteract)
self._marker.SetOrientationMarker(self._axes)
self._marker.SetViewport(0, 0, 0.25, 0.25)
self._marker.SetEnabled(0)
self._enabled = False
def isEnabled(self):
"""Returns True if the axes is enabled."""
return self._enabled
def toggle(self):
"""Toggle axes visibilty."""
if self.isEnabled():
self.remove()
else:
self.add()
def add(self):
"""Add the axis label."""
if not self.isEnabled():
self._marker.SetEnabled(1)
self._enabled = True
def remove(self):
"""Remove the axis label."""
if self.isEnabled():
self._marker.SetEnabled(0)
self._enabled = False
|
the-stack_0_21500 | from functools import partial
from typing import Any, List, Optional, Union
import torch
from torchvision.prototype.transforms import ImageNetEval
from torchvision.transforms.functional import InterpolationMode
from ....models.quantization.mobilenetv3 import (
InvertedResidualConfig,
QuantizableInvertedResidual,
QuantizableMobileNetV3,
_replace_relu,
)
from .._api import WeightsEnum, Weights
from .._meta import _IMAGENET_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_named_param
from ..mobilenetv3 import MobileNet_V3_Large_Weights, _mobilenet_v3_conf
__all__ = [
"QuantizableMobileNetV3",
"MobileNet_V3_Large_QuantizedWeights",
"mobilenet_v3_large",
]
def _mobilenet_v3_model(
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
weights: Optional[WeightsEnum],
progress: bool,
quantize: bool,
**kwargs: Any,
) -> QuantizableMobileNetV3:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "qnnpack")
model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs)
_replace_relu(model)
if quantize:
model.fuse_model()
model.qconfig = torch.quantization.get_default_qat_qconfig(backend)
torch.quantization.prepare_qat(model, inplace=True)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
if quantize:
torch.quantization.convert(model, inplace=True)
model.eval()
return model
class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
ImageNet1K_QNNPACK_V1 = Weights(
url="https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
"size": (224, 224),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "qnnpack",
"quantization": "qat",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3",
"unquantized": MobileNet_V3_Large_Weights.ImageNet1K_V1,
"acc@1": 73.004,
"acc@5": 90.858,
},
)
default = ImageNet1K_QNNPACK_V1
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: MobileNet_V3_Large_QuantizedWeights.ImageNet1K_QNNPACK_V1
if kwargs.get("quantize", False)
else MobileNet_V3_Large_Weights.ImageNet1K_V1,
)
)
def mobilenet_v3_large(
*,
weights: Optional[Union[MobileNet_V3_Large_QuantizedWeights, MobileNet_V3_Large_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableMobileNetV3:
weights = (MobileNet_V3_Large_QuantizedWeights if quantize else MobileNet_V3_Large_Weights).verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
return _mobilenet_v3_model(inverted_residual_setting, last_channel, weights, progress, quantize, **kwargs)
|
the-stack_0_21502 | #!/usr/bin/python
from basis import msg
def examples():
"""Prints examples of using the script to the console using colored output.
"""
script = "BASIS: 1D Quantum Potential Solver via Basis Expansion"
explain = ("For simple 1D potentials such as Kronig-Penny, infinite "
"square well, harmonic oscillator, etc. this code produces "
"a numerical solution via basis expansion.")
contents = [(("Solve the potential in `bump.cfg` using 200 basis functions."),
"basis_solve.py 200 -potential bump.cfg",
"This saves the solution to the default 'output.dat' "
"file in the current directory."),
(("Solve the potential `sho.cfg`, save the solution to "
"`mysol.out`."),
"basis_solve.py 1000 -potential sho.cfg -outfile mysol.out","")]
required = ("REQUIRED: potential config file `pot.cfg`.")
output = ("RETURNS: plot window if `-plot` is specified; solution "
"output is written to file.")
details = ("The plotting uses `matplotlib` with the default configured "
"backend. If you want a different backend, set the rc config "
"for `matplotlib` using online documentation.")
outputfmt = ("")
msg.example(script, explain, contents, required, output, outputfmt, details)
script_options = {
"N": dict(default=100, type=int,
help=("Specifies how many basis functions to use in the expansion "
"solution.")),
"-plot": dict(action="store_true",
help=("Plots the solution.")),
"-potential": dict(help="Path to the file that has the potential "
"parameters."),
"-outfile": dict(default="output.dat",
help="Override the default output file name.")
}
"""dict: default command-line arguments and their
:meth:`argparse.ArgumentParser.add_argument` keyword arguments.
"""
def _parser_options():
"""Parses the options and arguments from the command line."""
#We have two options: get some of the details from the config file,
import argparse
from basis import base
pdescr = "1D Quantum Potential Solver."
parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)
for arg, options in script_options.items():
parser.add_argument(arg, **options)
args = base.exhandler(examples, parser)
if args is None:
return
return args
def run(args):
return 0
if __name__ == '__main__':
run(_parser_options())
|
the-stack_0_21503 | #
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from operator import attrgetter
from core.vtg.emg.common.process import Subprocess, Receive, Dispatch
from core.vtg.emg.common.c import Variable
class FSA:
"""
Class intended for representing finite state machine (FSA) genereated from a process of an intermediate model.
Translation is based on extraction semantycs from ASTs given within the Process objects.
"""
def __init__(self, process):
"""
Import Process object and generate finite state machine on base of it.
:param process: Process object
"""
self.process = process
self.states = set()
self.__id_cnt = 0
# Generate AST states
sp_asts = dict()
sp_processed = set()
asts = list()
def generate_nodes(process, pr_ast):
"""
Generates states on base of FSA nodes but do not assign any edges. It explores AST node dictionary
extracting all particular actions from there like Dispatches, Calls, etc. It also attaches all generated
states on each atomic action to the corresponding node in AST.
:param process:
:param pr_ast: AST node dictionary.
:return: Initial states of the process.
"""
asts = [[pr_ast, True]]
initial_states = set()
while len(asts) > 0:
ast, initflag = asts.pop()
# Unwind AST nodes with operators and atomic actions
if ast['type'] == 'choice':
for action in ast['actions']:
asts.append([action, initflag])
elif ast['type'] == 'concatenation':
for action in ast['actions']:
if initflag:
asts.append([action, initflag])
initflag = False
else:
asts.append([action, initflag])
else:
# Generate State for atomic action
node = State(ast, self.__yield_id())
if ast['name'] not in process.actions:
raise KeyError("Process {!r} does not have action description {!r}".
format(process.name, ast['name']))
node.action = process.actions[ast['name']]
if isinstance(process.actions[ast['name']], Receive):
node.action.replicative = node.desc['replicative']
if isinstance(process.actions[ast['name']], Dispatch):
node.action.broadcast = node.desc['broadcast']
# Save State in AST
self.states.add(node)
ast['node'] = node
if initflag:
initial_states.add(node)
return initial_states
# Generate nodes for subprocesses first
for name in [name for name in process.actions.keys() if isinstance(process.actions[name], Subprocess)]:
# Make copy of the original AST to allow making changes there for more convinient exploration
ast = copy.copy(process.actions[name].process_ast)
generate_nodes(process, ast)
sp_asts[name] = ast
# Copy main process AST to allow changes introducing
p_ast = copy.copy(process.process_ast)
# Generates states exploring the AST of the process itself
generate_nodes(process, p_ast)
asts.append([p_ast, None])
def resolve_last(pr_ast):
"""
Get the AST (tree or subtree) and tries to determine which actions from this AST are the latest. It unwinds
choises and sequences until gets atomic action like Dispatch, Call, etc.
:param pr_ast: AST dictionary.
:return: Set of State objects.
"""
if not pr_ast:
return set()
asts = [pr_ast]
last = set()
while len(asts) > 0:
ast = asts.pop()
if ast['type'] == 'choice':
for action in ast['actions']:
asts.append(action)
elif ast['type'] == 'concatenation':
asts.append(ast['actions'][-1])
else:
last.add(ast['node'])
return last
# Explore AST and determine order of action invocating. Exploration goes from the latest action to the first
# one (ones). Order is set up by adding successors and predecessors to each State.
while len(asts) > 0:
ast, prev = asts.pop()
# Unwind AST nodes
if ast['type'] == 'choice':
for action in ast['actions']:
asts.append([action, prev])
elif ast['type'] == 'concatenation':
for action in ast['actions']:
asts.append([action, prev])
prev = action
else:
if ast['type'] == 'subprocess':
pair = "{} {}".format(ast['name'], str(prev))
if pair not in sp_processed:
# Mark processed state
sp_processed.add(pair)
asts.append([sp_asts[ast['name']], ast])
# Determine particular predecessors
last = resolve_last(prev)
if len(last) > 0 and prev['type'] != "subprocess":
# Filter out subprocesses if there are
last = [s for s in last if not isinstance(s.action, Subprocess)]
for pre_state in last:
ast['node'].insert_predecessor(pre_state)
return
@property
def initial_states(self):
"""
Returns initial states of the process.
:return: Sorted list with starting process State objects.
"""
initial_states = sorted([s for s in self.states if len(s.predecessors) == 0], key=attrgetter('identifier'))
if len(initial_states) == 0:
raise ValueError('FSA generated for process {!r} and category {!r} has no entry strates'.
format(self.process.name, self.process.category))
return initial_states
def resolve_state(self, identifier):
"""
Resolve and returns process State object by its identifier.
:param identifier: Int identifier
:return: State object.
"""
for state in (s for s in self.states if s.identifier == identifier):
return state
raise KeyError("State '{}' does not exist in process '{}' of category '{}'".
format(identifier, self.process.name, self.process.category))
def clone_state(self, node):
"""
Copy given State object, assign new identifier and place it as an alternative action with the same successors
and predecessors in FSA.
:param node: State object to copy
:return: New State object
"""
new_desc = copy.copy(node.desc)
new_id = self.__yield_id()
new_state = State(new_desc, new_id)
new_state.action = node.action
for pred in node.predecessors:
new_state.insert_predecessor(pred)
for succ in node.successors:
new_state.insert_successor(succ)
self.states.add(new_state)
return new_state
def add_new_predecessor(self, node, action):
"""
Add new predecessor State creating it from the action object (Condition, Dispatch, etc.)
:param node: State object to which new predecessor should be attached.
:param action: action object (Condition, Dispatch, etc.).
:return: New State object.
"""
new = self.new_state(action)
for pred in node.predecessors:
pred.replace_successor(node, new)
node.insert_predecessor(new)
return new
def add_new_successor(self, node, action):
"""
Add new successor State creating it from the action object (Condition, Dispatch, etc.)
:param node: State object to which new successor should be attached.
:param action: action object (Condition, Dispatch, etc.).
:return: New State object.
"""
new = self.new_state(action)
for succ in node.successors:
succ.replace_predecessor(node, new)
node.insert_successor(new)
return new
def new_state(self, action):
"""
Generates new State object for given action. Action can be None to create artificial states in FSA.
:param action: None or process action (Condition, Dispatch, etc.) object.
:return: New State object.
"""
if action:
desc = {
'label': '<{}>'.format(action.name)
}
else:
desc = {
'label': 'Artificial state'
}
new = State(desc, self.__yield_id())
new.action = action
self.states.add(new)
return new
def __yield_id(self):
self.__id_cnt += 1
return self.__id_cnt
class State:
"""Represent action node in FSA generated by process AST."""
def __init__(self, desc, identifier):
self.identifier = identifier
self.desc = desc
self._predecessors = set()
self._successors = set()
self.action = None
self.code = None
@property
def successors(self):
"""
Returns deterministically list with all next states.
:return: List with State objects.
"""
return sorted(self._successors, key=attrgetter('identifier'))
@property
def predecessors(self):
"""
Returns deterministically list with all previous states.
:return: List with State objects.
"""
return sorted(self._predecessors, key=attrgetter('identifier'))
def insert_successor(self, new):
"""
Link given State object to be a successor of this state.
:param new: New next State object.
:return: None
"""
self.add_successor(new)
new.add_predecessor(self)
def insert_predecessor(self, new):
"""
Link given State object to be a predecessor of this state.
:param new: New previous State object.
:return: None
"""
self.add_predecessor(new)
new.add_successor(self)
def replace_successor(self, old, new):
"""
Replace given successor State object with a new State object.
:param old: Old next State object.
:param new: New next State object.
:return: None
"""
self.remove_successor(old)
old.remove_predecessor(self)
self.add_successor(new)
new.add_predecessor(self)
def replace_predecessor(self, old, new):
"""
Replace given predecessor State object with a new State object.
:param old: Old predecessor State object.
:param new: New predecessor State object.
:return: None
"""
self.remove_predecessor(old)
old.remove_successor(self)
self.add_predecessor(new)
new.add_successor(self)
def add_successor(self, new):
"""
Link given State object to be a successor.
:param new: New next State object.
:return: None
"""
self._successors.add(new)
def add_predecessor(self, new):
"""
Link given State object to be a predecessor.
:param new: New previous State object.
:return: None
"""
self._predecessors.add(new)
def remove_successor(self, old):
"""
Unlink given State object and remove it from successors.
:param old: State object.
:return: None
"""
if old in self._successors:
self._successors.remove(old)
def remove_predecessor(self, old):
"""
Unlink given State object and remove it from predecessors.
:param old: State object.
:return: None
"""
if old in self._predecessors:
self._predecessors.remove(old)
class Automaton:
"""
This is a more abstract representation of FSA. It contins both FSA object generated for the process object and
process object itself. It also contains variables generated for labels of the process and simplifies work with
them.
"""
def __init__(self, process, identifier):
# Set default values
self.__label_variables = {}
self.__file = None
# Set given values
self.process = process
self.identifier = identifier
self.self_parallelism = True
# Generate FSA itself
self.fsa = FSA(self.process)
self.variables()
def variables(self, only_used=False):
"""
Generate a variable for each process label or just return an already generated list of variables.
:return: List with Variable objects.
"""
variables = []
# Generate variable for each label
for label in [self.process.labels[name] for name in self.process.labels.keys()]:
var = self.determine_variable(label, shadow_use=True)
if var:
variables.append(self.determine_variable(label, shadow_use=True))
if only_used:
variables = [v for v in variables if v.use > 0]
return variables
def determine_variable(self, label, shadow_use=False):
"""
Get Label object and generate a variable for it or just return an existing Variable object. Also increase a
counter of the variable usages.
:param label: Label object.
:param shadow_use: Do not increase the counter of usages of the variable if True.
:return: Variable object which corresponds to the label.
"""
if label.name in self.__label_variables and "default" in self.__label_variables[label.name]:
if not shadow_use:
self.__label_variables[label.name]["default"].use += 1
return self.__label_variables[label.name]["default"]
else:
if label.declaration:
var = Variable("ldv_{}_{}".format(self.identifier, label.name), label.declaration)
if label.value:
var.value = label.value
if label.name not in self.__label_variables:
self.__label_variables[label.name] = {}
self.__label_variables[label.name]["default"] = var
if not shadow_use:
self.__label_variables[label.name]["default"].use += 1
return self.__label_variables[label.name]["default"]
else:
return None
|
the-stack_0_21504 | """
Base settings to build other settings files upon.
"""
import environ
from corsheaders.defaults import default_headers as default_cors_headers
ROOT_DIR = environ.Path(__file__) - 3 # (safe_transaction_service/config/settings/base.py - 3 = safe-transaction-service/)
APPS_DIR = ROOT_DIR.path('safe_transaction_service')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
DOT_ENV_FILE = env('DJANGO_DOT_ENV_FILE', default=None)
if READ_DOT_ENV_FILE or DOT_ENV_FILE:
DOT_ENV_FILE = DOT_ENV_FILE or '.env'
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path(DOT_ENV_FILE)))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = False
DATABASES['default']['ENGINE'] = 'django_db_geventpool.backends.postgresql_psycopg2'
DATABASES['default']['OPTIONS'] = {
'MAX_CONNS': 20,
'REUSE_CONNS': 10
}
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
]
THIRD_PARTY_APPS = [
'corsheaders',
'rest_framework',
'drf_yasg',
]
LOCAL_APPS = [
'safe_transaction_service.contracts.apps.ContractsConfig',
'safe_transaction_service.history.apps.HistoryConfig',
'safe_transaction_service.notifications.apps.NotificationsConfig',
'safe_transaction_service.tokens.apps.TokensConfig',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'safe_transaction_service.history.utils.LoggingMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = env('STATIC_ROOT', default=str(ROOT_DIR('staticfiles')))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# CORS
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = list(default_cors_headers) + ['if-match', 'if-modified-since', 'if-none-match']
CORS_EXPOSE_HEADERS = ['etag']
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = r'^admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Gnosis""", '[email protected]'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# Celery
# ------------------------------------------------------------------------------
INSTALLED_APPS += [
'safe_transaction_service.taskapp.celery.CeleryConfig',
'django_celery_beat',
]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env('CELERY_BROKER_URL', default='django://')
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
if CELERY_BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ['json']
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = 'json'
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = 'json'
# We are not interested in keeping results of tasks
CELERY_IGNORE_RESULT = True
# Django REST Framework
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.AllowAny',),
'DEFAULT_RENDERER_CLASSES': (
'djangorestframework_camel_case.render.CamelCaseJSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'djangorestframework_camel_case.parser.CamelCaseJSONParser',
),
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'EXCEPTION_HANDLER': 'safe_transaction_service.history.exceptions.custom_exception_handler',
}
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins bon every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'ignore_succeeded_none': {
'()': 'safe_transaction_service.taskapp.celery.IgnoreSucceededNone'
},
},
'formatters': {
'short': {
'format': '%(asctime)s %(message)s'
},
'verbose': {
'format': '%(asctime)s [%(levelname)s] [%(processName)s] %(message)s'
},
'celery_verbose': {
'class': 'safe_transaction_service.taskapp.celery.PatchedCeleryFormatter',
'format': '%(asctime)s [%(levelname)s] [%(task_id)s/%(task_name)s] %(message)s',
# 'format': '%(asctime)s [%(levelname)s] [%(processName)s] [%(task_id)s/%(task_name)s] %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'console_short': {
'class': 'logging.StreamHandler',
'formatter': 'short',
},
'celery_console': {
'level': 'DEBUG',
'filters': ['ignore_succeeded_none'],
'class': 'logging.StreamHandler',
'formatter': 'celery_verbose',
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO',
},
'LoggingMiddleware': {
'handlers': ['console_short'],
'level': 'INFO',
'propagate': False,
},
'safe_transaction_service.history.indexers.internal_tx_indexer': {
'level': 'INFO',
},
'safe_transaction_service.history.indexers.erc20_events_indexer': {
'level': 'INFO',
},
'safe_transaction_service.history.indexers.tx_processor': {
'level': 'INFO',
},
'safe_transaction_service.history.services.balance_service': {
'level': 'WARNING',
},
'safe_transaction_service.history.services.collectibles_service': {
'level': 'WARNING',
},
'celery': {
'handlers': ['celery_console'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate': False, # If not it will be out for the root logger too
},
'celery.worker.strategy': { # All the "Received task..."
'handlers': ['console'],
'level': 'INFO' if DEBUG else 'WARNING',
'propagate': False, # If not it will be out for the root logger too
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
REDIS_URL = env('REDIS_URL', default='redis://localhost:6379/0')
# Ethereum
# ------------------------------------------------------------------------------
ETHEREUM_NODE_URL = env('ETHEREUM_NODE_URL', default=None)
ETHEREUM_TRACING_NODE_URL = env('ETHEREUM_TRACING_NODE_URL', default=None)
ETH_INTERNAL_TXS_BLOCK_PROCESS_LIMIT = env('ETH_INTERNAL_TXS_BLOCK_PROCESS_LIMIT', default=10000)
ETH_INTERNAL_NO_FILTER = env.bool('ETH_INTERNAL_NO_FILTER', default=False)
# Safe
# ------------------------------------------------------------------------------
# Number of blocks from the current block number needed to consider a transaction valid/stable
ETH_REORG_BLOCKS = env.int('ETH_REORG_BLOCKS', default=10)
# Oracles
ETH_UNISWAP_FACTORY_ADDRESS = env('ETH_UNISWAP_FACTORY_ADDRESS',
default='0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95')
ETH_KYBER_NETWORK_PROXY_ADDRESS = env('ETH_KYBER_NETWORK_PROXY_ADDRESS',
default='0x818E6FECD516Ecc3849DAf6845e3EC868087B755')
# Tokens
TOKENS_LOGO_BASE_URI = env('TOKENS_LOGO_BASE_URI', default='https://gnosis-safe-token-logos.s3.amazonaws.com/')
TOKENS_LOGO_EXTENSION = env('TOKENS_LOGO_EXTENSION', default='.png')
# Slack notifications
SLACK_API_WEBHOOK = env('SLACK_API_WEBHOOK', default=None)
# Notifications
NOTIFICATIONS_FIREBASE_CREDENTIALS_PATH = env('NOTIFICATIONS_FIREBASE_CREDENTIALS_PATH', default=None)
if NOTIFICATIONS_FIREBASE_CREDENTIALS_PATH:
import json
NOTIFICATIONS_FIREBASE_AUTH_CREDENTIALS = json.load(
environ.Path(NOTIFICATIONS_FIREBASE_CREDENTIALS_PATH).file('firebase-credentials.json')
)
# AWS S3 https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None)
AWS_QUERYSTRING_AUTH = False # Remove query parameter authentication from generated URLs
AWS_S3_CUSTOM_DOMAIN = env('AWS_S3_CUSTOM_DOMAIN', default=None) # Set custom domain for file urls (like cloudfront)
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None)
AWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None)
AWS_CONFIGURED = bool(AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_STORAGE_BUCKET_NAME)
ETHERSCAN_API_KEY = env('ETHERSCAN_API_KEY', default=None)
|
the-stack_0_21505 | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain GPT2"""
import torch
from megatron import get_args
from megatron import print_rank_0
from megatron import get_timers
from megatron import get_tokenizer
from megatron import mpu
from megatron.data.gpt2_dataset import build_train_valid_test_datasets
from megatron.model import GPT2Model, GPT2ModelPipe
from megatron.training import pretrain
from megatron.utils import get_ltor_masks_and_position_ids
from megatron.utils import reduce_losses
from megatron.fp16 import fp32_to_fp16
# pretend this is a great DeepSpeed change too
def model_provider():
"""Build the model."""
args = get_args()
print_rank_0('building GPT2 model ...')
if args.pipe_parallel_size == 0:
model = GPT2Model(num_tokentypes=0, parallel_output=True)
else:
model = GPT2ModelPipe(num_tokentypes=0, parallel_output=True, topology=mpu.get_topology())
model._megatron_batch_fn = get_batch_pipe
model._input_grad = [True, False]
model._input_type = ['float', 'bool']
model._input_pipe_partitioned = [True, False]
return model
def get_batch(data_iterator):
"""Generate a batch"""
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ['text']
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b['text'].long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
return tokens, labels, loss_mask, attention_mask, position_ids
def get_batch_pipe(data):
"""A modification of get_batch() to work with the latest batch instead of an iterator. """
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ['text']
datatype = torch.int64
# Broadcast data.
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b['text'].long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
# unpack data
if args.fp16:
# cast to fp16 because pipeline parallelism skips the FP16 wrapper.
return fp32_to_fp16((tokens, position_ids, attention_mask)), fp32_to_fp16((labels, loss_mask))
else:
return (tokens, position_ids, attention_mask), (labels, loss_mask)
def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers('batch generator').start()
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(data_iterator)
timers('batch generator').stop()
# Forward model.
losses = model(tokens, position_ids, attention_mask, labels=labels)
loss_mask = loss_mask.view(-1)
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
# Reduce loss for logging.
reduced_loss = reduce_losses([loss])
return loss, {'lm loss': reduced_loss[0]}
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for GPT2 ...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup))
print_rank_0("> finished creating GPT2 datasets ...")
return train_ds, valid_ds, test_ds
if __name__ == "__main__":
pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer'})
|
the-stack_0_21507 | import re
import json
import sys
import os
args = sys.argv
if (len(args) < 2):
sys.exit(1)
path = args[1]
if(path[-1:] == "/"):
path = path[:-1]
result_filedata_list = []
registry_info = {}
target_filepath_list = []
target_filepath_list.append('/1/stdout.txt')
target_filepath_list.append('/3/stdout.txt')
for target_filepath in target_filepath_list:
filepath = path + '/command' + target_filepath
if os.path.isfile(filepath) and os.path.getsize(filepath) > 0:
with open(filepath) as file_object:
reader = json.load(file_object)
if isinstance(reader, list):
rows = reader
else:
rows = []
rows.append(reader)
for row in rows:
child_name = ''
for path_key, path_value in row.items():
filedata_table = {}
for param_key, param_value in path_value.items():
if param_key == 'logFileName':
filedata_table['LogPath'] = param_value
elif param_key == 'retention':
if param_value.lower() == 'true':
filedata_table['Retention'] = True
else:
filedata_table['Retention'] = False
elif param_key == 'autoBackup':
if param_value.lower() == 'true':
filedata_table['AutoBackup'] = True
else:
filedata_table['AutoBackup'] = False
else:
filedata_table[param_key] = param_value
if len(filedata_table) > 0:
registry_info[path_key] = filedata_table
target_filepath_list = []
target_filepath_list.append('/0/stdout.txt')
target_filepath_list.append('/2/stdout.txt')
for target_filepath in target_filepath_list:
filepath = path + '/command' + target_filepath
if os.path.isfile(filepath) and os.path.getsize(filepath) > 0:
with open(filepath) as file_object:
reader = json.load(file_object)
if isinstance(reader, list):
rows = reader
else:
rows = []
rows.append(reader)
for row in rows:
filedata_table = {}
for param_key, param_value in row.items():
if param_key == 'OverflowAction':
if param_value == -1:
filedata_table[param_key] = 'DoNotOverwrite'
elif param_value == 0:
filedata_table[param_key] = 'OverwriteAsNeeded'
elif param_value == 1:
filedata_table[param_key] = 'OverwriteOlder'
elif param_key == 'LogName':
filedata_table['Log'] = param_value
elif param_key == 'MaximumSizeInBytes':
filedata_table['MaximumKilobytes'] = int(param_value / 1024)
else:
filedata_table[param_key] = param_value
if param_key == 'Log' or param_key == 'LogName':
if param_value in registry_info:
filedata_table.update(registry_info[param_value])
if len(filedata_table) > 0:
result_filedata_list.append(filedata_table)
result = {}
target_parameter_root_key = 'VAR_WIN_EventLog'
result[target_parameter_root_key] = result_filedata_list
print(json.dumps(result))
|
the-stack_0_21510 | from wtforms import fields
from peewee import (CharField, DateTimeField, DateField, TimeField,
PrimaryKeyField, ForeignKeyField, BaseModel)
from wtfpeewee.orm import ModelConverter, model_form
from flask_admin import form
from flask_admin._compat import iteritems, itervalues
from flask_admin.model.form import InlineFormAdmin, InlineModelConverterBase
from flask_admin.model.fields import InlineModelFormField, InlineFieldList, AjaxSelectField
from .tools import get_primary_key
from .ajax import create_ajax_loader
class InlineModelFormList(InlineFieldList):
"""
Customized inline model form list field.
"""
form_field_type = InlineModelFormField
"""
Form field type. Override to use custom field for each inline form
"""
def __init__(self, form, model, prop, inline_view, **kwargs):
self.form = form
self.model = model
self.prop = prop
self.inline_view = inline_view
self._pk = get_primary_key(model)
super(InlineModelFormList, self).__init__(self.form_field_type(form, self._pk), **kwargs)
def display_row_controls(self, field):
return field.get_pk() is not None
# *** bryhoyt removed def process() entirely, because I believe it was buggy
# (but worked because another part of the code had a complimentary bug)
# and I'm not sure why it was necessary anyway.
# If we want it back in, we need to fix the following bogus query:
# self.model.select().where(attr == data).execute() # `data` is not an ID, and only happened to be so because we patched it in in .contribute() below
#
# For reference:
# .process() introduced in https://github.com/flask-admin/flask-admin/commit/2845e4b28cb40b25e2bf544b327f6202dc7e5709
# Fixed, brokenly I think, in https://github.com/flask-admin/flask-admin/commit/4383eef3ce7eb01878f086928f8773adb9de79f8#diff-f87e7cd76fb9bc48c8681b24f238fb13R30
def populate_obj(self, obj, name):
pass
def save_related(self, obj):
model_id = getattr(obj, self._pk)
attr = getattr(self.model, self.prop)
values = self.model.select().where(attr == model_id).execute()
pk_map = dict((str(getattr(v, self._pk)), v) for v in values)
# Handle request data
for field in self.entries:
field_id = field.get_pk()
if field_id in pk_map:
model = pk_map[field_id]
if self.should_delete(field):
model.delete_instance(recursive=True)
continue
else:
model = self.model()
field.populate_obj(model, None)
# Force relation
setattr(model, self.prop, model_id)
self.inline_view.on_model_change(field, model)
model.save()
class CustomModelConverter(ModelConverter):
def __init__(self, view, additional=None):
super(CustomModelConverter, self).__init__(additional)
self.view = view
# @todo: This really should be done within wtfpeewee
self.defaults[CharField] = fields.StringField
self.converters[PrimaryKeyField] = self.handle_pk
self.converters[DateTimeField] = self.handle_datetime
self.converters[DateField] = self.handle_date
self.converters[TimeField] = self.handle_time
self.overrides = getattr(self.view, 'form_overrides', None) or {}
def handle_foreign_key(self, model, field, **kwargs):
loader = getattr(self.view, '_form_ajax_refs', {}).get(field.name)
if loader:
if field.null:
kwargs['allow_blank'] = True
return field.name, AjaxSelectField(loader, **kwargs)
return super(CustomModelConverter, self).handle_foreign_key(model, field, **kwargs)
def handle_pk(self, model, field, **kwargs):
kwargs['validators'] = []
return field.name, fields.HiddenField(**kwargs)
def handle_date(self, model, field, **kwargs):
kwargs['widget'] = form.DatePickerWidget()
return field.name, fields.DateField(**kwargs)
def handle_datetime(self, model, field, **kwargs):
kwargs['widget'] = form.DateTimePickerWidget()
return field.name, fields.DateTimeField(**kwargs)
def handle_time(self, model, field, **kwargs):
return field.name, form.TimeField(**kwargs)
def get_form(model, converter,
base_class=form.BaseForm,
only=None,
exclude=None,
field_args=None,
allow_pk=False,
extra_fields=None):
"""
Create form from peewee model and contribute extra fields, if necessary
"""
result = model_form(model,
base_class=base_class,
only=only,
exclude=exclude,
field_args=field_args,
allow_pk=allow_pk,
converter=converter)
if extra_fields:
for name, field in iteritems(extra_fields):
setattr(result, name, form.recreate_field(field))
return result
class InlineModelConverter(InlineModelConverterBase):
"""
Inline model form helper.
"""
inline_field_list_type = InlineModelFormList
"""
Used field list type.
If you want to do some custom rendering of inline field lists,
you can create your own wtforms field and use it instead
"""
def get_info(self, p):
info = super(InlineModelConverter, self).get_info(p)
if info is None:
if isinstance(p, BaseModel):
info = InlineFormAdmin(p)
else:
model = getattr(p, 'model', None)
if model is None:
raise Exception('Unknown inline model admin: %s' % repr(p))
attrs = dict()
for attr in dir(p):
if not attr.startswith('_') and attr != 'model':
attrs[attr] = getattr(p, attr)
info = InlineFormAdmin(model, **attrs)
# Resolve AJAX FKs
info._form_ajax_refs = self.process_ajax_refs(info)
return info
def process_ajax_refs(self, info):
refs = getattr(info, 'form_ajax_refs', None)
result = {}
if refs:
for name, opts in iteritems(refs):
new_name = '%s.%s' % (info.model.__name__.lower(), name)
loader = None
if isinstance(opts, (list, tuple)):
loader = create_ajax_loader(info.model, new_name, name, opts)
else:
loader = opts
result[name] = loader
self.view._form_ajax_refs[new_name] = loader
return result
def contribute(self, converter, model, form_class, inline_model):
# Find property from target model to current model
reverse_field = None
info = self.get_info(inline_model)
for field in info.model._meta.get_fields():
field_type = type(field)
if field_type == ForeignKeyField:
if field.rel_model == model:
reverse_field = field
break
else:
raise Exception('Cannot find reverse relation for model %s' % info.model)
# Remove reverse property from the list
ignore = [reverse_field.name]
if info.form_excluded_columns:
exclude = ignore + info.form_excluded_columns
else:
exclude = ignore
# Create field
child_form = info.get_form()
if child_form is None:
child_form = model_form(info.model,
base_class=form.BaseForm,
only=info.form_columns,
exclude=exclude,
field_args=info.form_args,
allow_pk=True,
converter=converter)
prop_name = reverse_field.related_name
label = self.get_label(info, prop_name)
setattr(form_class,
prop_name,
self.inline_field_list_type(child_form,
info.model,
reverse_field.name,
info,
label=label or info.model.__name__))
return form_class
def save_inline(form, model):
for f in itervalues(form._fields):
if f.type == 'InlineModelFormList':
f.save_related(model)
|
the-stack_0_21511 | import sys
import ctypes
from PyQt5 import QtWidgets
from src.ui.mainwindow import MainWindow
class Application:
def __init__(self):
if sys.platform.startswith('win'):
Application.set_app_user_model_id()
Application.override_exception_hook()
self.app = QtWidgets.QApplication(sys.argv)
self.window = MainWindow()
self.window.show()
self.window.center()
sys.exit(self.app.exec_())
@staticmethod
def set_app_user_model_id():
app_user_model_id = "ca.ulaval.gaul.basestation" # must be a unicode string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(app_user_model_id)
@staticmethod
def override_exception_hook():
"""
Back up the reference to the exception hook and set the exception hook to our wrapping function.
This is required to prevent Qt from crashing without printing the stacktrace.
"""
initial_exception_hook = sys.excepthook
def exception_hook_wrapper(exception_type, value, traceback):
# Print the error and traceback
print(exception_type, value, traceback)
# Call the normal Exception hook after
initial_exception_hook(exception_type, value, traceback)
sys.exit(1)
sys.excepthook = exception_hook_wrapper
|
the-stack_0_21512 | # Michal Lyskawinski
# Date: 11/28/2016
# Vasicek Model
from math import *
from scipy.stats import norm
# interest rate parameters:
rzero = 0.04
rbar = 0.05
a = 1.5
sigma = 0.01
# Option parameters: European Call
OM = 2 # Option Maturity in years
OBM = 6 # Option's Bond Maturity in years
K = 0.73
# Bond parameters:
BM = 2 # Bond Maturity in years
def PDB(r,M):
P = exp(-r*M)
return P
def Pbond(a,M,sigma,rbar):
Bts = (1/a)*(1-PDB(a,M))
Rinf = rbar - 0.5*((sigma**2)/(a**2))
lnA = (Rinf/a)*(1-PDB(a,M)) - M*Rinf - ((sigma**2)/(4*a**3))*(1-PDB(a,M))**2
P = (PDB(-lnA,1))*(PDB(rbar,Bts)) # Price of the bond
return P
print('Price of the bond =',Pbond(a,BM,sigma,rbar))
# Pricing the option
sigmaR = (sigma/(a*(OM)))*(1-PDB(a,OM)) # Spot rate volatility
POB = Pbond(a,OBM,sigma,rbar)
PO = Pbond(a,OM,sigma,rbar)
v = sqrt(((sigma**2)*(1-PDB(2*a,OM)))/(2*a))
sigmaP =(v*(1-PDB(a, OBM-OM)))/(a)
d1 = ((log(POB/(K*PO)))/(sigmaP))+(sigmaP/2)
d2 = d1 - sigmaP
c = POB*norm.cdf(d1) - K*PO*norm.cdf(d2)
print('Price of the European Call =',c) |
the-stack_0_21514 | #!/usr/bin/env python
#
# File Name : bleu.py
#
# Description : Wrapper for BLEU scorer.
#
# Creation Date : 06-01-2015
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
from typing import List, Tuple
import numpy as np
from .bleu_scorer import BleuScorer
class Bleu:
"""Compute BLEU score for a set of candidate sentences."""
def __init__(self, n: int = 4) -> None:
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(
self, reference: List[List[str]], hypothesis: List[List[str]]
) -> Tuple[List[float], List[List[float]]]:
"""
Compute CIDEr score given a set of reference and candidate sentences
for the dataset.
Parameters
----------
reference : List[List[str]] ([[ref1a, ref1b, ref1c], ..., [refna, refnb]])
Reference sentences
hypothesis : List[List[str]] ([[hypo1], [hypo2], ..., [hypon]])
Predicted sentences
Returns
-------
average_score : List[float]
Mean BLEU-1 to BLEU-4 score computed by averaging scores for all the images
scores : List[List[float]]
BLEU-1 to BLEU-4 scores computed for each image
"""
assert len(reference) == len(hypothesis)
bleu_scorer = BleuScorer(n = self._n)
for id, hypo in enumerate(hypothesis):
hypo = hypo
ref = reference[id]
# sanity check
assert(type(hypo) is list)
assert(len(hypo) >= 1)
assert(type(ref) is list)
assert(len(ref) > 0)
bleu_scorer += (hypo[0], ref)
# score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=0)
# score, scores = bleu_scorer.compute_score(option='average', verbose=1)
return score, scores
def method(self) -> str:
return "Bleu"
|
the-stack_0_21515 | from __future__ import print_function
import argparse
import random
import numpy as np
import time
from collections import OrderedDict
from pybullet_tools.utils import TURTLEBOT_URDF, joints_from_names, \
set_joint_positions, get_bodies, sample_placement, pairwise_collision, \
set_point, Point, create_box, stable_z, TAN, GREY, connect, PI, wait_if_gui, dump_body, set_all_color, BLUE, \
link_from_name, draw_pose, pose_from_pose2d, \
set_random_seed, set_numpy_seed, joint_from_name, safe_zip, draw_base_limits, BodySaver, WorldSaver, LockRenderer, \
elapsed_time, disconnect, flatten, \
INF, wait_for_duration, draw_aabb, DEFAULT_AABB_BUFFER, get_joint_positions, \
get_pairs, get_distance_fn, step_simulation, get_bodies_in_region, \
AABB, Profiler, pairwise_link_collision, BASE_LINK, get_collision_data, draw_pose2d, \
CIRCULAR_LIMITS, wrap_interval, rescale_interval, adjust_path, \
contact_collision, timer, get_aabb, Pose, get_all_links, can_collide, DEFAULT_RESOLUTION, \
load_pybullet, get_collision_fn, get_limits_fn, \
get_joint_velocities, control_joint, get_time_step, remove_handles, Interval, get_distance, \
get_duration_fn, velocity_control_joint, get_max_velocities, plan_base_joint_motion, \
UNBOUNDED_LIMITS, BLACK, GREEN, RED, add_line, \
point_from_pose, retime_path, smooth_path, get_acceleration_fn, discretize_curve
from motion_planners.trajectory.smooth import smooth_curve
from motion_planners.trajectory.limits import check_spline
from motion_planners.utils import default_selector, irange
#from motion_planners.tkinter.samplers import get_cost_fn
from motion_planners.lazy_prm import ROADMAPS
from pybullet_tools.retime import sample_curve
BASE_LINK_NAME = 'base_link'
BASE_JOINTS = ['x', 'y', 'theta']
DRAW_Z = 1e-1
DRAW_LENGTH = 0.5
MIN_AABB_VOLUME = DEFAULT_AABB_BUFFER**3
MAX_VELOCITIES = np.array([1., 1., PI / 4])
MAX_ACCELERATIONS = MAX_VELOCITIES / 0.25
#MAX_VELOCITIES *= INF
#MAX_ACCELERATIONS *= INF
MIN_PROXIMITY = 1e-2
N_DIGITS = 5
##################################################
def create_custom_base_limits(robot, base_limits):
return {joint_from_name(robot, joint): limits
for joint, limits in safe_zip(BASE_JOINTS[:2], zip(*base_limits))}
def sample_placements(body_surfaces, obstacles=None, savers=[], min_distances={}):
if obstacles is None:
obstacles = set(get_bodies()) - set(body_surfaces)
savers = list(savers) + [BodySaver(obstacle) for obstacle in obstacles]
if not isinstance(min_distances, dict):
min_distances = {body: min_distances for body in body_surfaces}
# TODO: max attempts here
for body, surface in body_surfaces.items(): # TODO: shuffle
min_distance = min_distances.get(body, 0.)
while True:
pose = sample_placement(body, surface)
if pose is None:
for saver in savers:
saver.restore()
return False
for saver in savers:
obstacle = saver.body
if obstacle in [body, surface]:
continue
saver.restore()
if pairwise_collision(body, obstacle, max_distance=min_distance):
break
else:
savers.append(BodySaver(body))
break
for saver in savers:
saver.restore()
return True
##################################################
def draw_waypoint(conf, z=DRAW_Z):
return draw_pose(pose_from_pose2d(conf, z=z), length=DRAW_LENGTH)
def draw_conf(pose2d, interval, base_z=1., z_interval=Interval(-0.5, 0.5), **kwargs):
return draw_pose2d(pose2d, z=base_z + rescale_interval(
wrap_interval(pose2d[2], interval=interval), old_interval=interval, new_interval=z_interval), **kwargs)
def draw_path(path2d, z=DRAW_Z, base_z=1., **kwargs):
if path2d is None:
return []
#return list(flatten(draw_pose(pose_from_pose2d(pose2d, z=z), **kwargs) for pose2d in path2d))
#return list(flatten(draw_pose2d(pose2d, z=z, **kwargs) for pose2d in path2d))
start = path2d[0]
mid_yaw = start[2]
#mid_yaw = wrap_interval(mid_yaw)
interval = (mid_yaw - PI, mid_yaw + PI)
#interval = CIRCULAR_LIMITS
draw_pose(pose_from_pose2d(start, z=base_z), length=1, **kwargs)
# TODO: draw the current pose
# TODO: line between orientations when there is a jump
return list(flatten(draw_conf(pose2d, interval, base_z, **kwargs) for pose2d in path2d))
def extract_full_path(robot, path_joints, path, all_joints):
with BodySaver(robot):
new_path = []
for conf in path:
set_joint_positions(robot, path_joints, conf)
new_path.append(get_joint_positions(robot, all_joints)) # TODO: do without assigning
return new_path
def draw_last_roadmap(robot, joints, only_checked=False, linear=True, down_sample=None, **kwargs):
q0 = get_joint_positions(robot, joints)
handles = []
if not ROADMAPS:
return handles
roadmap = ROADMAPS[-1]
for q in roadmap.samples:
q = q if len(q) == 3 else np.append(q[:2], q0[2:]) # TODO: make a function
handles.extend(draw_pose2d(q, z=DRAW_Z))
for v1, v2 in roadmap.edges:
color = BLACK
if roadmap.is_colliding(v1, v2):
color = RED
elif roadmap.is_safe(v1, v2):
color = GREEN
elif only_checked:
continue
if linear:
path = [roadmap.samples[v1], roadmap.samples[v2]]
else:
path = roadmap.get_path(v1, v2)
if down_sample is not None:
path = path[::down_sample] + [path[-1]]
#handles.extend(draw_path(path, **kwargs))
points = list(map(point_from_pose, [pose_from_pose2d(
q if len(q) == 3 else np.append(q[:2], q0[2:]), z=DRAW_Z) for q in path]))
handles.extend(add_line(p1, p2, color=color) for p1, p2 in get_pairs(points))
return handles
##################################################
def problem1(n_obstacles=10, wall_side=0.1, obst_width=0.25, obst_height=0.5):
floor_extent = 5.0
base_limits = (-floor_extent/2.*np.ones(2),
+floor_extent/2.*np.ones(2))
floor_height = 0.001
floor = create_box(floor_extent, floor_extent, floor_height, color=TAN)
set_point(floor, Point(z=-floor_height/2.))
wall1 = create_box(floor_extent + wall_side, wall_side, wall_side, color=GREY)
set_point(wall1, Point(y=floor_extent/2., z=wall_side/2.))
wall2 = create_box(floor_extent + wall_side, wall_side, wall_side, color=GREY)
set_point(wall2, Point(y=-floor_extent/2., z=wall_side/2.))
wall3 = create_box(wall_side, floor_extent + wall_side, wall_side, color=GREY)
set_point(wall3, Point(x=floor_extent/2., z=wall_side/2.))
wall4 = create_box(wall_side, floor_extent + wall_side, wall_side, color=GREY)
set_point(wall4, Point(x=-floor_extent/2., z=wall_side/2.))
wall5 = create_box(obst_width, obst_width, obst_height, color=GREY)
set_point(wall5, Point(z=obst_height / 2.))
walls = [wall1, wall2, wall3, wall4, wall5]
initial_surfaces = OrderedDict()
for _ in range(n_obstacles - 1):
body = create_box(obst_width, obst_width, obst_height, color=GREY)
initial_surfaces[body] = floor
pillars = list(initial_surfaces)
obstacles = walls + pillars
initial_conf = np.array([+floor_extent/3, -floor_extent/3, 3*PI/4])
goal_conf = -initial_conf
robot = load_pybullet(TURTLEBOT_URDF, fixed_base=True, merge=True, sat=False)
base_joints = joints_from_names(robot, BASE_JOINTS)
# base_link = child_link_from_joint(base_joints[-1])
base_link = link_from_name(robot, BASE_LINK_NAME)
set_all_color(robot, BLUE)
dump_body(robot)
set_point(robot, Point(z=stable_z(robot, floor)))
#set_point(robot, Point(z=base_aligned_z(robot)))
draw_pose(Pose(), parent=robot, parent_link=base_link, length=0.5)
set_joint_positions(robot, base_joints, initial_conf)
sample_placements(initial_surfaces, obstacles=[robot] + walls,
savers=[BodySaver(robot, joints=base_joints, positions=goal_conf)],
min_distances=10e-2)
# The first calls appear to be the slowest
# times = []
# for body1, body2 in combinations(pillars, r=2):
# start_time = time.time()
# colliding = pairwise_collision(body1, body2)
# runtime = elapsed_time(start_time)
# print(colliding, runtime)
# times.append(runtime)
# print(times)
return robot, base_limits, goal_conf, obstacles
##################################################
def compute_cost(robot, joints, path, resolutions=None):
if path is None:
return INF
distance_fn = get_distance_fn(robot, joints, weights=resolutions) # TODO: get_duration_fn
return sum(distance_fn(*pair) for pair in get_pairs(path))
def get_curve_collision_fn(robot, joints, custom_limits={}, resolutions=None, v_max=None, a_max=None, **kwargs):
collision_fn = get_collision_fn(robot, joints, custom_limits=custom_limits, **kwargs)
limits_fn = get_limits_fn(robot, joints, custom_limits)
def curve_collision_fn(curve, t0, t1):
if curve is None:
return True
# TODO: can exactly compute limit violations
# if not check_spline(curve, v_max=max_velocities, a_max=None, verbose=False,
# #start_t=t0, end_t=t1,
# ):
# return True
_, samples = time_discretize_curve(curve, verbose=False,
#start_t=t0, end_t=t1,
resolution=resolutions,
#max_velocities=v_max,
)
if any(map(limits_fn, samples)):
return True
if any(map(collision_fn, default_selector(samples))):
return True
return False
return curve_collision_fn
##################################################
def mpc(x0, v0, curve, dt_max=0.5, max_time=INF, max_iterations=INF, v_max=None, **kwargs):
assert (max_time < INF) or (max_iterations < INF)
from scipy.interpolate import CubicHermiteSpline
start_time = time.time()
best_cost, best_spline = INF, None
for iteration in irange(max_iterations):
if elapsed_time(start_time) >= max_time:
break
t1 = random.uniform(curve.x[0], curve.x[-1])
future = (curve.x[-1] - t1) # TODO: weighted
if future >= best_cost:
continue
x1 = curve(t1)
if (v_max is not None) and (max((x1 - x0) / v_max) > dt_max):
continue
# if quickest_inf_accel(x0, x1, v_max=v_max) > dt_max:
# continue
v1 = curve(t1, nu=1)
#dt = dt_max
dt = random.uniform(0, dt_max)
times = [0., dt]
positions = [x0, x1]
velocities = [v0, v1]
spline = CubicHermiteSpline(times, positions, dydx=velocities)
if not check_spline(spline, **kwargs):
continue
# TODO: optimize to find the closest on the path within a range
cost = future + (spline.x[-1] - spline.x[0])
if cost < best_cost:
best_cost, best_spline = cost, spline
print('Iteration: {} | Cost: {:.3f} | T: {:.3f} | Time: {:.3f}'.format(
iteration, cost, t1, elapsed_time(start_time)))
print(best_cost, t1, elapsed_time(start_time))
return best_cost, best_spline
def find_closest(x0, curve, t_range=None, max_time=INF, max_iterations=INF, distance_fn=None, verbose=False):
assert (max_time < INF) or (max_iterations < INF)
if t_range is None:
t_range = Interval(curve.x[0], curve.x[-1])
t_range = Interval(max(t_range[0], curve.x[0]), min(curve.x[-1], t_range[-1]))
if distance_fn is None:
distance_fn = get_distance
start_time = time.time()
closest_dist, closest_t = INF, None
for iteration in irange(max_iterations):
if elapsed_time(start_time) >= max_time:
break
t = random.uniform(*t_range) # TODO: halton
x = curve(t)
dist = distance_fn(x0, x)
if dist < closest_dist:
closest_dist, closest_t = dist, t
if verbose:
print('Iteration: {} | Dist: {:.3f} | T: {:.3f} | Time: {:.3f}'.format(
iteration, closest_dist, t, elapsed_time(start_time)))
return closest_dist, closest_t
##################################################
def max_velocity_control_joints(robot, joints, positions=None, velocities=None, max_velocities=None):
if velocities is None:
velocities = np.zeros(len(joints))
if max_velocities is None:
max_velocities = get_max_velocities(robot, joints)
for idx, joint in enumerate(joints):
if positions is not None:
control_joint(robot, joint=joint, position=positions[idx],
# velocity=0.,
velocity=velocities[idx], # if abs(velocities[idx]) > 1e-3 else 0,
# max_velocity=abs(velocities[idx]),
max_velocity=abs(max_velocities[idx]), # TODO: max_velocity and velocity==0 cause issues
position_gain=10, velocity_scale=None, max_force=None)
else:
velocity_control_joint(robot, joint=joint, velocity=velocities[idx],
max_velocity=abs(max_velocities[idx]),
position_gain=None, velocity_scale=None, max_force=None)
def control_state(robot, joints, target_positions, target_velocities=None, position_tol=INF, velocity_tol=INF,
max_velocities=None, verbose=True): # TODO: max_accelerations
if target_velocities is None:
target_velocities = np.zeros(len(joints))
if max_velocities is None:
max_velocities = get_max_velocities(robot, joints)
assert (max_velocities > 0).all()
max_velocity_control_joints(robot, joints, positions=target_positions, velocities=target_velocities,
max_velocities=max_velocities)
for i in irange(INF):
current_positions = np.array(get_joint_positions(robot, joints))
position_error = get_distance(current_positions, target_positions, norm=INF)
current_velocities = np.array(get_joint_velocities(robot, joints))
velocity_error = get_distance(current_velocities, target_velocities, norm=INF)
if verbose:
# print('Positions: {} | Target positions: {}'.format(current_positions.round(N_DIGITS), target_positions.round(N_DIGITS)))
# print('Velocities: {} | Target velocities: {}'.format(current_velocities.round(N_DIGITS), target_velocities.round(N_DIGITS)))
print('Step: {} | Position error: {:.3f}/{:.3f} | Velocity error: {:.3f}/{:.3f}'.format(
i, position_error, position_tol, velocity_error, velocity_tol))
# TODO: draw the tolerance interval
if (position_error <= position_tol) and (velocity_error <= velocity_tol):
return # TODO: declare success or failure by yielding or throwing an exception
yield i
def follow_curve(robot, joints, curve, goal_t=None, time_step=None, max_velocities=MAX_VELOCITIES, **kwargs):
if goal_t is None:
goal_t = curve.x[-1]
if time_step is None:
time_step = 10*get_time_step()
#distance_fn = get_distance_fn(robot, joints, weights=None, norm=2)
distance_fn = get_duration_fn(robot, joints, velocities=max_velocities, norm=INF) # get_distance
positions = np.array(get_joint_positions(robot, joints))
closest_dist, closest_t = find_closest(positions, curve, t_range=(curve.x[0], goal_t), max_time=1e-1,
max_iterations=INF, distance_fn=distance_fn, verbose=True)
print('Closest dist: {:.3f} | Closest time: {:.3f}'.format(closest_dist, closest_t))
target_t = closest_t
# TODO: condition based on closest_dist
while True:
print('\nTarget time: {:.3f} | Goal time: {}'.format(target_t, goal_t))
target_positions = curve(target_t)
target_velocities = curve(target_t, nu=1) # TODO: draw the velocity
#print('Positions: {} | Velocities: {}'.format(target_positions, target_velocities))
handles = draw_waypoint(target_positions)
is_goal = (target_t == goal_t)
position_tol = 1e-2 if is_goal else 1e-2
for output in control_state(robot, joints, target_positions=target_positions, target_velocities=target_velocities,
position_tol=position_tol, velocity_tol=INF, max_velocities=max_velocities, **kwargs):
yield output
remove_handles(handles)
target_t = min(goal_t, target_t + time_step)
if is_goal:
break
##################################################
def follow_curve_old(robot, joints, curve, goal_t=None):
# TODO: unify with /Users/caelan/Programs/open-world-tamp/open_world/simulation/control.py
if goal_t is None:
goal_t = curve.x[-1]
time_step = get_time_step()
target_step = 10*time_step
#distance_fn = get_distance_fn(robot, joints, weights=None, norm=2)
distance_fn = get_duration_fn(robot, joints, velocities=MAX_VELOCITIES, norm=INF)
for i in irange(INF):
# if (i % 10) != 0:
# continue
current_p = np.array(get_joint_positions(robot, joints))
current_v = np.array(get_joint_velocities(robot, joints))
goal_dist = distance_fn(current_p, curve(goal_t))
print('Positions: {} | Velocities: {} | Goal distance: {:.3f}'.format(
current_p.round(N_DIGITS), current_v.round(N_DIGITS), goal_dist))
if goal_dist < 1e-2:
return True
# _, connection = mpc(current_p, current_v, curve, v_max=MAX_VELOCITIES, a_max=MAX_ACCELERATIONS,
# dt_max=1e-1, max_time=1e-1)
# assert connection is not None
# target_t = 0.5*connection.x[-1]
# target_p = connection(target_t)
# target_v = connection(target_t, nu=1)
# #print(target_p)
closest_dist, closest_t = find_closest(current_p, curve, t_range=None, max_time=1e-2,
max_iterations=INF, distance_fn=distance_fn, verbose=True)
target_t = min(closest_t + target_step, curve.x[-1])
target_p = curve(target_t)
#target_v = curve(target_t, nu=1)
target_v = curve(closest_t, nu=1)
#target_v = MAX_VELOCITIES
#target_v = INF*np.zeros(len(joints))
handles = draw_waypoint(target_p)
#times, confs = time_discretize_curve(curve, verbose=False, resolution=resolutions) # max_velocities=v_max,
# set_joint_positions(robot, joints, target_p)
max_velocity_control_joints(robot, joints,
positions=target_p,
velocities=target_v,
max_velocities=MAX_VELOCITIES)
#next_t = closest_t + time_step
#next_p = current_p + current_v*time_step
yield target_t
actual_p = np.array(get_joint_positions(robot, joints))
actual_v = np.array(get_joint_velocities(robot, joints))
next_p = current_p + actual_v*time_step
print('Predicted: {} | Actual: {}'.format(next_p.round(N_DIGITS), actual_p.round(N_DIGITS)))
remove_handles(handles)
##################################################
def simulate_curve(robot, joints, curve):
#set_joint_positions(robot, joints, curve(random.uniform(curve.x[0], curve.x[-1])))
wait_if_gui(message='Begin?')
#controller = follow_curve_old(robot, joints, curve)
controller = follow_curve(robot, joints, curve)
for _ in controller:
step_simulation()
#wait_if_gui()
#wait_for_duration(duration=time_step)
#time.sleep(time_step)
wait_if_gui(message='Finish?')
def step_curve(robot, joints, path, step_size=None):
wait_if_gui(message='Begin?')
for i, conf in enumerate(path):
set_joint_positions(robot, joints, conf)
if step_size is None:
wait_if_gui(message='{}/{} Continue?'.format(i, len(path)))
else:
wait_for_duration(duration=step_size)
wait_if_gui(message='Finish?')
##################################################
def iterate_path(robot, joints, path, simulate=False, step_size=None, resolutions=None, smooth=False, **kwargs): # 1e-2 | None
if path is None:
return
saver = WorldSaver()
path = adjust_path(robot, joints, path)
with LockRenderer():
handles = draw_path(path)
waypoints = path
#waypoints = waypoints_from_path(path)
#curve = interpolate_path(robot, joints, waypoints, k=1, velocity_fraction=1) # TODO: no velocities in the URDF
if not smooth:
curve = retime_path(robot, joints, path, max_velocities=MAX_VELOCITIES, max_accelerations=MAX_ACCELERATIONS)
else:
curve = smooth_path(robot, joints, path, resolutions=resolutions,
max_velocities=MAX_VELOCITIES, max_accelerations=MAX_ACCELERATIONS, **kwargs)
path = discretize_curve(robot, joints, curve, resolutions=resolutions)
print('Steps: {} | Start: {:.3f} | End: {:.3f} | Knots: {}'.format(
len(path), curve.x[0], curve.x[-1], len(curve.x)))
with LockRenderer():
handles = draw_path(path)
if False:
# TODO: handle circular joints
#curve_collision_fn = lambda *args, **kwargs: False
curve_collision_fn = get_curve_collision_fn(robot, joints, resolutions=resolutions, **kwargs)
with LockRenderer():
with Profiler():
curve = smooth_curve(curve, MAX_VELOCITIES, MAX_ACCELERATIONS,
curve_collision_fn, max_time=5) #, curve_collision_fn=[])
saver.restore()
path = [conf for t, conf in sample_curve(curve, time_step=step_size)]
print('Steps: {} | Start: {:.3f} | End: {:.3f} | Knots: {}'.format(
len(path), curve.x[0], curve.x[-1], len(curve.x)))
with LockRenderer():
handles = draw_path(path)
if simulate:
simulate_curve(robot, joints, curve)
else:
path = [conf for t, conf in sample_curve(curve, time_step=step_size)]
step_curve(robot, joints, path, step_size=step_size)
##################################################
def test_aabb(robot):
base_link = link_from_name(robot, BASE_LINK_NAME)
region_aabb = AABB(lower=-np.ones(3), upper=+np.ones(3))
draw_aabb(region_aabb)
# bodies = get_bodies_in_region(region_aabb)
# print(len(bodies), bodies)
# for body in get_bodies():
# set_pose(body, Pose())
#step_simulation() # Need to call before get_bodies_in_region
#update_scene()
for i in range(3):
with timer(message='{:f}'):
bodies = get_bodies_in_region(region_aabb) # This does cache some info
print(i, len(bodies), bodies)
# https://github.com/bulletphysics/bullet3/search?q=broadphase
# https://github.com/bulletphysics/bullet3/search?p=1&q=getCachedOverlappingObjects&type=&utf8=%E2%9C%93
# https://andysomogyi.github.io/mechanica/bullet.html
# http://www.cs.kent.edu/~ruttan/GameEngines/lectures/Bullet_User_Manual
aabb = get_aabb(robot)
# aabb = get_subtree_aabb(robot, base_link)
print(aabb)
draw_aabb(aabb)
for link in [BASE_LINK, base_link]:
print(link, get_collision_data(robot, link), pairwise_link_collision(robot, link, robot, link))
def test_caching(robot, obstacles):
with timer(message='{:f}'):
#update_scene() # 5.19752502441e-05
step_simulation() # 0.000210046768188
with timer(message='{:f}'):
#print(get_aabb(robot, link=None, only_collision=True))
print(contact_collision()) # 2.50339508057e-05
for _ in range(3):
with timer(message='{:f}'):
#print(get_aabb(robot, link=None, only_collision=True)) # Recomputes each time
print(contact_collision()) # 1.69277191162e-05
print()
obstacle = obstacles[-1]
#for link in get_all_links(robot):
# set_collision_pair_mask(robot, obstacle, link1=link, enable=False) # Doesn't seem to affect pairwise_collision
with timer('{:f}'):
print(pairwise_collision(robot, obstacle)) # 0.000031
links = get_all_links(robot)
links = [link for link in get_all_links(robot) if can_collide(robot, link)]
#links = randomize(links)
with timer('{:f}'):
print(any(pairwise_collision(robot, obstacle, link1=link) for link in links # 0.000179
))
#if aabb_overlap(get_aabb(robot, link), get_aabb(obstacles[-1]))))
#if can_collide(robot, link)))
with timer('{:f}'):
print(pairwise_collision(robot, obstacle))
##################################################
def main():
np.set_printoptions(precision=N_DIGITS, suppress=True, threshold=3) # , edgeitems=1) #, linewidth=1000)
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--algorithm', default='rrt_connect', # choices=[],
help='The motion planning algorithm to use.')
parser.add_argument('-c', '--cfree', action='store_true',
help='When enabled, disables collision checking.')
# parser.add_argument('-p', '--problem', default='test_pour', choices=sorted(problem_fn_from_name),
# help='The name of the problem to solve.')
parser.add_argument('--holonomic', action='store_true', # '-h',
help='')
parser.add_argument('-l', '--lock', action='store_false',
help='')
parser.add_argument('-r', '--reversible', action='store_true',
help='')
parser.add_argument('-s', '--seed', default=None, type=int, # None | 1
help='The random seed to use.')
parser.add_argument('-n', '--num', default=10, type=int,
help='The number of obstacles.')
parser.add_argument('-o', '--orientation', action='store_true',
help='')
parser.add_argument('-v', '--viewer', action='store_false',
help='')
args = parser.parse_args()
connect(use_gui=args.viewer)
#set_aabb_buffer(buffer=1e-3)
#set_separating_axis_collisions()
#seed = 0
#seed = time.time()
seed = args.seed
if seed is None:
seed = random.randint(0, 10**3-1)
print('Seed:', seed)
set_random_seed(seed=seed) # None: 2147483648 = 2**31
set_numpy_seed(seed=seed)
#print('Random seed:', get_random_seed(), random.random())
#print('Numpy seed:', get_numpy_seed(), np.random.random())
#########################
robot, base_limits, goal_conf, obstacles = problem1(n_obstacles=args.num)
custom_limits = create_custom_base_limits(robot, base_limits)
base_joints = joints_from_names(robot, BASE_JOINTS)
draw_base_limits(base_limits)
# draw_pose(get_link_pose(robot, base_link), length=0.5)
start_conf = get_joint_positions(robot, base_joints)
for conf in [start_conf, goal_conf]:
draw_waypoint(conf)
#resolutions = None
#resolutions = np.array([0.05, 0.05, math.radians(10)])
plan_joints = base_joints[:2] if not args.orientation else base_joints
d = len(plan_joints)
holonomic = args.holonomic or (d != 3)
resolutions = 1.*DEFAULT_RESOLUTION*np.ones(d) # TODO: default resolutions, velocities, accelerations fns
#weights = np.reciprocal(resolutions)
weights = np.array([1, 1, 1e-3])[:d]
cost_fn = get_acceleration_fn(robot, plan_joints, max_velocities=MAX_VELOCITIES[:d],
max_accelerations=MAX_ACCELERATIONS[:d])
# TODO: check that taking shortest turning direction (reversible affecting?)
if args.cfree:
obstacles = []
# for obstacle in obstacles:
# draw_aabb(get_aabb(obstacle)) # Updates automatically
#set_all_static() # Doesn't seem to affect
#test_aabb(robot)
#test_caching(robot, obstacles)
#return
#########################
# TODO: filter if straight-line path is feasible
saver = WorldSaver()
wait_for_duration(duration=1e-2)
start_time = time.time()
with LockRenderer(lock=args.lock):
with Profiler(field='cumtime', num=25): # tottime | cumtime | None
# TODO: draw the search tree
path = plan_base_joint_motion(
robot, plan_joints, goal_conf[:d],
holonomic=holonomic, reversible=args.reversible,
obstacles=obstacles, self_collisions=False, custom_limits=custom_limits,
use_aabb=True, cache=True, max_distance=MIN_PROXIMITY,
resolutions=resolutions, weights=weights, # TODO: use KDTrees
circular={2: UNBOUNDED_LIMITS if holonomic else CIRCULAR_LIMITS},
cost_fn=cost_fn,
algorithm=args.algorithm, verbose=True,
restarts=5, max_iterations=50,
smooth=None if holonomic else 100, smooth_time=1, # None | 100 | INF
)
saver.restore()
# TODO: draw ROADMAPS
#wait_for_duration(duration=1e-3)
#########################
solved = path is not None
length = INF if path is None else len(path)
cost = compute_cost(robot, base_joints, path, resolutions=resolutions[:len(plan_joints)])
print('Solved: {} | Length: {} | Cost: {:.3f} | Runtime: {:.3f} sec'.format(
solved, length, cost, elapsed_time(start_time)))
if path is None:
wait_if_gui()
disconnect()
return
# for i, conf in enumerate(path):
# set_joint_positions(robot, plan_joints, conf)
# wait_if_gui('{}/{}) Continue?'.format(i + 1, len(path)))
path = extract_full_path(robot, plan_joints, path, base_joints)
with LockRenderer():
draw_last_roadmap(robot, base_joints)
# for i, conf in enumerate(path):
# set_joint_positions(robot, base_joints, conf)
# wait_if_gui('{}/{}) Continue?'.format(i+1, len(path)))
iterate_path(robot, base_joints, path, step_size=2e-2, smooth=holonomic, custom_limits=custom_limits,
resolutions=DEFAULT_RESOLUTION*np.ones(3), # resolutions
obstacles=obstacles, self_collisions=False, max_distance=MIN_PROXIMITY)
disconnect()
if __name__ == '__main__':
main()
|
the-stack_0_21516 | #!/usr/bin/env python3
"""
Rules for building C/API module with f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2004/11/26 11:13:06 $
Pearu Peterson
"""
__version__ = "$Revision: 1.16 $"[10:-1]
f2py_version = 'See `f2py -v`'
import copy
from f2py_skel.stds.auxfuncs import (
getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in,
isintent_out, islogicalfunction, ismoduleroutine, isscalar,
issubroutine, issubroutine_wrap, outmess, show
)
def var2fixfortran(vars, a, fa=None, f90mode=None):
if fa is None:
fa = a
if a not in vars:
show(vars)
outmess('var2fixfortran: No definition for argument "%s".\n' % a)
return ''
if 'typespec' not in vars[a]:
show(vars[a])
outmess('var2fixfortran: No typespec for argument "%s".\n' % a)
return ''
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
lk = ''
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
lk = 'kind'
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
lk = 'len'
if '*' in selector:
if f90mode:
if selector['*'] in ['*', ':', '(*)']:
vardef = '%s(len=*)' % (vardef)
else:
vardef = '%s(%s=%s)' % (vardef, lk, selector['*'])
else:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
vardef = '%s %s' % (vardef, fa)
if 'dimension' in vars[a]:
vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension']))
return vardef
def createfuncwrapper(rout, signature=0):
assert isfunction(rout)
extra_args = []
vars = rout['vars']
for a in rout['args']:
v = rout['vars'][a]
for i, d in enumerate(v.get('dimension', [])):
if d == ':':
dn = 'f2py_%s_d%s' % (a, i)
dv = dict(typespec='integer', intent=['hide'])
dv['='] = 'shape(%s, %s)' % (a, i)
extra_args.append(dn)
vars[dn] = dv
v['dimension'][i] = dn
rout['args'].extend(extra_args)
need_interface = bool(extra_args)
ret = ['']
def add(line, ret=ret):
ret[0] = '%s\n %s' % (ret[0], line)
name = rout['name']
fortranname = getfortranname(rout)
f90mode = ismoduleroutine(rout)
newname = '%sf2pywrap' % (name)
if newname not in vars:
vars[newname] = vars[name]
args = [newname] + rout['args'][1:]
else:
args = [newname] + rout['args']
l = var2fixfortran(vars, name, newname, f90mode)
if l[:13] == 'character*(*)':
if f90mode:
l = 'character(len=10)' + l[13:]
else:
l = 'character*10' + l[13:]
charselect = vars[name]['charselector']
if charselect.get('*', '') == '(*)':
charselect['*'] = '10'
sargs = ', '.join(args)
if f90mode:
add('subroutine f2pywrap_%s_%s (%s)' %
(rout['modulename'], name, sargs))
if not signature:
add('use %s, only : %s' % (rout['modulename'], fortranname))
else:
add('subroutine f2pywrap%s (%s)' % (name, sargs))
if not need_interface:
add('external %s' % (fortranname))
l = l + ', ' + fortranname
if need_interface:
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' not in line:
add(line)
args = args[1:]
dumped_args = []
for a in args:
if isexternal(vars[a]):
add('external %s' % (a))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isscalar(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isintent_in(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
add(var2fixfortran(vars, a, f90mode=f90mode))
add(l)
if need_interface:
if f90mode:
# f90 module already defines needed interface
pass
else:
add('interface')
add(rout['saved_interface'].lstrip())
add('end interface')
sargs = ', '.join([a for a in args if a not in extra_args])
if not signature:
if islogicalfunction(rout):
add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs))
else:
add('%s = %s(%s)' % (newname, fortranname, sargs))
if f90mode:
add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name))
else:
add('end')
return ret[0]
def createsubrwrapper(rout, signature=0):
assert issubroutine(rout)
extra_args = []
vars = rout['vars']
for a in rout['args']:
v = rout['vars'][a]
for i, d in enumerate(v.get('dimension', [])):
if d == ':':
dn = 'f2py_%s_d%s' % (a, i)
dv = dict(typespec='integer', intent=['hide'])
dv['='] = 'shape(%s, %s)' % (a, i)
extra_args.append(dn)
vars[dn] = dv
v['dimension'][i] = dn
rout['args'].extend(extra_args)
need_interface = bool(extra_args)
ret = ['']
def add(line, ret=ret):
ret[0] = '%s\n %s' % (ret[0], line)
name = rout['name']
fortranname = getfortranname(rout)
f90mode = ismoduleroutine(rout)
args = rout['args']
sargs = ', '.join(args)
if f90mode:
add('subroutine f2pywrap_%s_%s (%s)' %
(rout['modulename'], name, sargs))
if not signature:
add('use %s, only : %s' % (rout['modulename'], fortranname))
else:
add('subroutine f2pywrap%s (%s)' % (name, sargs))
if not need_interface:
add('external %s' % (fortranname))
if need_interface:
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' not in line:
add(line)
dumped_args = []
for a in args:
if isexternal(vars[a]):
add('external %s' % (a))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isscalar(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
add(var2fixfortran(vars, a, f90mode=f90mode))
if need_interface:
if f90mode:
# f90 module already defines needed interface
pass
else:
add('interface')
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' in line:
continue
add(line)
add('end interface')
sargs = ', '.join([a for a in args if a not in extra_args])
if not signature:
add('call %s(%s)' % (fortranname, sargs))
if f90mode:
add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name))
else:
add('end')
return ret[0]
def assubr(rout):
if isfunction_wrap(rout):
fortranname = getfortranname(rout)
name = rout['name']
outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % (
name, fortranname))
rout = copy.copy(rout)
fname = name
rname = fname
if 'result' in rout:
rname = rout['result']
rout['vars'][fname] = rout['vars'][rname]
fvar = rout['vars'][fname]
if not isintent_out(fvar):
if 'intent' not in fvar:
fvar['intent'] = []
fvar['intent'].append('out')
flag = 1
for i in fvar['intent']:
if i.startswith('out='):
flag = 0
break
if flag:
fvar['intent'].append('out=%s' % (rname))
rout['args'][:] = [fname] + rout['args']
return rout, createfuncwrapper(rout)
if issubroutine_wrap(rout):
fortranname = getfortranname(rout)
name = rout['name']
outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' % (
name, fortranname))
rout = copy.copy(rout)
return rout, createsubrwrapper(rout)
return rout, ''
|
the-stack_0_21517 | import argparse
import logging
import shtab
from dvc.cli.command import CmdBaseNoRepo
from dvc.cli.utils import append_doc_link
from dvc.ui import ui
logger = logging.getLogger(__name__)
FILE = shtab.FILE
DIR = shtab.DIRECTORY
PREAMBLE = {
"bash": """
# $1=COMP_WORDS[1]
_dvc_compgen_DVCFiles() {
compgen -d -S '/' -- $1 # recurse into subdirs
compgen -f -X '!*?.dvc' -- $1
compgen -f -X '!*Dvcfile' -- $1
compgen -f -X '!*dvc.yaml' -- $1
}
_dvc_compgen_stages() {
local _dvc_stages=($(dvc stage list -q --names-only))
compgen -W "${_dvc_stages[*]}" -- $1
}
_dvc_compgen_stages_and_files() {
_dvc_compgen_DVCFiles $1
_dvc_compgen_stages $1
}
_dvc_compgen_exps() {
local _dvc_exps=($(dvc exp list -q --all --names-only))
compgen -W "${_dvc_exps[*]}" -- $1
}
""",
"zsh": """
_dvc_compadd_DVCFiles() {
_files -g '(*?.dvc|Dvcfile|dvc.yaml)'
}
_dvc_compadd_stages() {
# this will also show up the description of the stages
_describe 'stages' "($(_dvc_stages_output))"
}
_dvc_stages_output() {
dvc stage list -q | awk '{
# escape possible `:` on the stage name
sub(/:/, "\\\\\\\\:", $1);
# read all of the columns except the first
# reading `out` from $2, so as not to have a leading whitespace
out=$2; for(i=3;i<=NF;i++){out=out" "$i};
# print key, ":" and then single-quote the description
# colon is a delimiter used by `_describe` to separate field/description
print $1":""\\047"out"\\047"
# single quote -> \\047
}'
}
_dvc_compadd_stages_and_files() {
_dvc_compadd_DVCFiles
_dvc_compadd_stages
}
_dvc_compadd_exps() {
_describe 'experiments' "($(dvc exp list -q -a --names-only))"
}
""",
}
DVC_FILE = {"bash": "_dvc_compgen_DVCFiles", "zsh": "_dvc_compadd_DVCFiles"}
STAGE = {"bash": "_dvc_compgen_stages", "zsh": "_dvc_compadd_stages"}
DVCFILES_AND_STAGE = {
"bash": "_dvc_compgen_stages_and_files",
"zsh": "_dvc_compadd_stages_and_files",
}
EXPERIMENT = {"bash": "_dvc_compgen_exps", "zsh": "_dvc_compadd_exps"}
class CmdCompletion(CmdBaseNoRepo):
def run(self):
from dvc.cli.parser import get_main_parser
parser = get_main_parser()
shell = self.args.shell
script = shtab.complete(parser, shell=shell, preamble=PREAMBLE)
ui.write(script, force=True)
return 0
def add_parser(subparsers, parent_parser):
COMPLETION_HELP = "Generate shell tab completion."
COMPLETION_DESCRIPTION = "Prints out shell tab completion scripts."
completion_parser = subparsers.add_parser(
"completion",
parents=[parent_parser],
description=append_doc_link(COMPLETION_DESCRIPTION, "completion"),
help=COMPLETION_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
completion_parser.add_argument(
"-s",
"--shell",
help="Shell syntax for completions.",
default="bash",
choices=["bash", "zsh"],
)
completion_parser.set_defaults(func=CmdCompletion)
|
the-stack_0_21518 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import shutil
from pathlib import Path
import jsonlines
import numpy as np
import paddle
import yaml
from paddle import DataParallel
from paddle import distributed as dist
from paddle.io import DataLoader
from paddle.io import DistributedBatchSampler
from paddle.optimizer import Adam
from yacs.config import CfgNode
from paddlespeech.t2s.datasets.am_batch_fn import vits_single_spk_batch_fn
from paddlespeech.t2s.datasets.data_table import DataTable
from paddlespeech.t2s.models.vits import VITS
from paddlespeech.t2s.models.vits import VITSEvaluator
from paddlespeech.t2s.models.vits import VITSUpdater
from paddlespeech.t2s.modules.losses import DiscriminatorAdversarialLoss
from paddlespeech.t2s.modules.losses import FeatureMatchLoss
from paddlespeech.t2s.modules.losses import GeneratorAdversarialLoss
from paddlespeech.t2s.modules.losses import KLDivergenceLoss
from paddlespeech.t2s.modules.losses import MelSpectrogramLoss
from paddlespeech.t2s.training.extensions.snapshot import Snapshot
from paddlespeech.t2s.training.extensions.visualizer import VisualDL
from paddlespeech.t2s.training.optimizer import scheduler_classes
from paddlespeech.t2s.training.seeding import seed_everything
from paddlespeech.t2s.training.trainer import Trainer
def train_sp(args, config):
# decides device type and whether to run in parallel
# setup running environment correctly
world_size = paddle.distributed.get_world_size()
if (not paddle.is_compiled_with_cuda()) or args.ngpu == 0:
paddle.set_device("cpu")
else:
paddle.set_device("gpu")
if world_size > 1:
paddle.distributed.init_parallel_env()
# set the random seed, it is a must for multiprocess training
seed_everything(config.seed)
print(
f"rank: {dist.get_rank()}, pid: {os.getpid()}, parent_pid: {os.getppid()}",
)
# dataloader has been too verbose
logging.getLogger("DataLoader").disabled = True
fields = ["text", "text_lengths", "feats", "feats_lengths", "wave"]
converters = {
"wave": np.load,
"feats": np.load,
}
# construct dataset for training and validation
with jsonlines.open(args.train_metadata, 'r') as reader:
train_metadata = list(reader)
train_dataset = DataTable(
data=train_metadata,
fields=fields,
converters=converters, )
with jsonlines.open(args.dev_metadata, 'r') as reader:
dev_metadata = list(reader)
dev_dataset = DataTable(
data=dev_metadata,
fields=fields,
converters=converters, )
# collate function and dataloader
train_sampler = DistributedBatchSampler(
train_dataset,
batch_size=config.batch_size,
shuffle=True,
drop_last=True)
dev_sampler = DistributedBatchSampler(
dev_dataset,
batch_size=config.batch_size,
shuffle=False,
drop_last=False)
print("samplers done!")
train_batch_fn = vits_single_spk_batch_fn
train_dataloader = DataLoader(
train_dataset,
batch_sampler=train_sampler,
collate_fn=train_batch_fn,
num_workers=config.num_workers)
dev_dataloader = DataLoader(
dev_dataset,
batch_sampler=dev_sampler,
collate_fn=train_batch_fn,
num_workers=config.num_workers)
print("dataloaders done!")
with open(args.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
odim = config.n_fft // 2 + 1
model = VITS(idim=vocab_size, odim=odim, **config["model"])
gen_parameters = model.generator.parameters()
dis_parameters = model.discriminator.parameters()
if world_size > 1:
model = DataParallel(model)
gen_parameters = model._layers.generator.parameters()
dis_parameters = model._layers.discriminator.parameters()
print("model done!")
# loss
criterion_mel = MelSpectrogramLoss(
**config["mel_loss_params"], )
criterion_feat_match = FeatureMatchLoss(
**config["feat_match_loss_params"], )
criterion_gen_adv = GeneratorAdversarialLoss(
**config["generator_adv_loss_params"], )
criterion_dis_adv = DiscriminatorAdversarialLoss(
**config["discriminator_adv_loss_params"], )
criterion_kl = KLDivergenceLoss()
print("criterions done!")
lr_schedule_g = scheduler_classes[config["generator_scheduler"]](
**config["generator_scheduler_params"])
optimizer_g = Adam(
learning_rate=lr_schedule_g,
parameters=gen_parameters,
**config["generator_optimizer_params"])
lr_schedule_d = scheduler_classes[config["discriminator_scheduler"]](
**config["discriminator_scheduler_params"])
optimizer_d = Adam(
learning_rate=lr_schedule_d,
parameters=dis_parameters,
**config["discriminator_optimizer_params"])
print("optimizers done!")
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
if dist.get_rank() == 0:
config_name = args.config.split("/")[-1]
# copy conf to output_dir
shutil.copyfile(args.config, output_dir / config_name)
updater = VITSUpdater(
model=model,
optimizers={
"generator": optimizer_g,
"discriminator": optimizer_d,
},
criterions={
"mel": criterion_mel,
"feat_match": criterion_feat_match,
"gen_adv": criterion_gen_adv,
"dis_adv": criterion_dis_adv,
"kl": criterion_kl,
},
schedulers={
"generator": lr_schedule_g,
"discriminator": lr_schedule_d,
},
dataloader=train_dataloader,
lambda_adv=config.lambda_adv,
lambda_mel=config.lambda_mel,
lambda_kl=config.lambda_kl,
lambda_feat_match=config.lambda_feat_match,
lambda_dur=config.lambda_dur,
generator_first=config.generator_first,
output_dir=output_dir)
evaluator = VITSEvaluator(
model=model,
criterions={
"mel": criterion_mel,
"feat_match": criterion_feat_match,
"gen_adv": criterion_gen_adv,
"dis_adv": criterion_dis_adv,
"kl": criterion_kl,
},
dataloader=dev_dataloader,
lambda_adv=config.lambda_adv,
lambda_mel=config.lambda_mel,
lambda_kl=config.lambda_kl,
lambda_feat_match=config.lambda_feat_match,
lambda_dur=config.lambda_dur,
generator_first=config.generator_first,
output_dir=output_dir)
trainer = Trainer(
updater,
stop_trigger=(config.train_max_steps, "iteration"),
out=output_dir)
if dist.get_rank() == 0:
trainer.extend(
evaluator, trigger=(config.eval_interval_steps, 'iteration'))
trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration'))
trainer.extend(
Snapshot(max_size=config.num_snapshots),
trigger=(config.save_interval_steps, 'iteration'))
print("Trainer Done!")
trainer.run()
def main():
# parse args and config and redirect to train_sp
parser = argparse.ArgumentParser(description="Train a VITS model.")
parser.add_argument("--config", type=str, help="VITS config file")
parser.add_argument("--train-metadata", type=str, help="training data.")
parser.add_argument("--dev-metadata", type=str, help="dev data.")
parser.add_argument("--output-dir", type=str, help="output dir.")
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.")
parser.add_argument(
"--phones-dict", type=str, default=None, help="phone vocabulary file.")
args = parser.parse_args()
with open(args.config, 'rt') as f:
config = CfgNode(yaml.safe_load(f))
print("========Args========")
print(yaml.safe_dump(vars(args)))
print("========Config========")
print(config)
print(
f"master see the word size: {dist.get_world_size()}, from pid: {os.getpid()}"
)
# dispatch
if args.ngpu > 1:
dist.spawn(train_sp, (args, config), nprocs=args.ngpu)
else:
train_sp(args, config)
if __name__ == "__main__":
main()
|
the-stack_0_21520 | """Module to map a blockchain event to a state change.
All functions that map an event to a state change must be side-effect free. If
any additional data is necessary, either from the database or the blockchain
itself, an utility should be added to `raiden.blockchain.state`, and then
called by `blockchainevent_to_statechange`.
"""
from dataclasses import dataclass
import structlog
from raiden.blockchain.events import DecodedEvent
from raiden.blockchain.state import (
ChannelSettleState,
NewChannelDetails,
get_contractreceivechannelbatchunlock_data_from_event,
get_contractreceivechannelclosed_data_from_event,
get_contractreceivechannelnew_data_from_event,
get_contractreceivechannelsettled_data_from_event,
get_contractreceiveupdatetransfer_data_from_event,
)
from raiden.constants import EMPTY_LOCKSROOT, LOCKSROOT_OF_NO_LOCKS
from raiden.network.proxies.proxy_manager import ProxyManager
from raiden.settings import MediationFeeConfig, RaidenConfig
from raiden.storage.sqlite import SerializedSQLiteStorage
from raiden.transfer.architecture import StateChange
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.transfer.state import (
ChainState,
FeeScheduleState,
NettingChannelEndState,
NettingChannelState,
SuccessfulTransactionState,
TokenNetworkState,
TransactionChannelDeposit,
TransactionExecutionStatus,
)
from raiden.transfer.state_change import (
ContractReceiveChannelBatchUnlock,
ContractReceiveChannelClosed,
ContractReceiveChannelDeposit,
ContractReceiveChannelNew,
ContractReceiveChannelSettled,
ContractReceiveChannelWithdraw,
ContractReceiveNewTokenNetwork,
ContractReceiveRouteNew,
ContractReceiveSecretReveal,
ContractReceiveUpdateTransfer,
UpdateServicesAddressesStateChange,
)
from raiden.utils.typing import (
Balance,
BlockNumber,
BlockTimeout,
Optional,
SecretRegistryAddress,
TokenAddress,
TokenNetworkAddress,
TokenNetworkRegistryAddress,
)
from raiden_contracts.constants import (
EVENT_REGISTERED_SERVICE,
EVENT_SECRET_REVEALED,
EVENT_TOKEN_NETWORK_CREATED,
ChannelEvent,
)
log = structlog.get_logger(__name__)
@dataclass(frozen=True)
class ChannelConfig:
"""Configuration options for a new channel."""
reveal_timeout: BlockTimeout
fee_schedule: FeeScheduleState
def contractreceivenewtokennetwork_from_event(
event: DecodedEvent,
) -> ContractReceiveNewTokenNetwork:
data = event.event_data
args = data["args"]
token_network_address = args["token_network_address"]
token_address = TokenAddress(args["token_address"])
token_network_registry_address = TokenNetworkRegistryAddress(event.originating_contract)
return ContractReceiveNewTokenNetwork(
token_network_registry_address=token_network_registry_address,
token_network=TokenNetworkState(
address=token_network_address,
token_address=token_address,
),
transaction_hash=event.transaction_hash,
block_number=event.block_number,
block_hash=event.block_hash,
)
def contractreceiveroutenew_from_event(event: DecodedEvent) -> ContractReceiveRouteNew:
data = event.event_data
args = data["args"]
return ContractReceiveRouteNew(
canonical_identifier=CanonicalIdentifier(
chain_identifier=event.chain_id,
token_network_address=TokenNetworkAddress(event.originating_contract),
channel_identifier=args["channel_identifier"],
),
participant1=args["participant1"],
participant2=args["participant2"],
transaction_hash=event.transaction_hash,
block_number=event.block_number,
block_hash=event.block_hash,
)
def contractreceivechannelnew_from_event(
new_channel_details: NewChannelDetails, channel_config: ChannelConfig, event: DecodedEvent
) -> ContractReceiveChannelNew:
data = event.event_data
args = data["args"]
settle_timeout = args["settle_timeout"]
block_number = event.block_number
identifier = args["channel_identifier"]
token_network_address = TokenNetworkAddress(event.originating_contract)
our_state = NettingChannelEndState(new_channel_details.our_address, Balance(0))
partner_state = NettingChannelEndState(new_channel_details.partner_address, Balance(0))
open_transaction = SuccessfulTransactionState(block_number, None)
# If the node was offline for a long period, the channel may have been
# closed already, if that is the case during initialization the node will
# process the other events and update the channel's state
close_transaction: Optional[TransactionExecutionStatus] = None
settle_transaction: Optional[TransactionExecutionStatus] = None
channel_state = NettingChannelState(
canonical_identifier=CanonicalIdentifier(
chain_identifier=new_channel_details.chain_id,
token_network_address=token_network_address,
channel_identifier=identifier,
),
token_address=new_channel_details.token_address,
token_network_registry_address=new_channel_details.token_network_registry_address,
reveal_timeout=channel_config.reveal_timeout,
settle_timeout=settle_timeout,
fee_schedule=channel_config.fee_schedule,
our_state=our_state,
partner_state=partner_state,
open_transaction=open_transaction,
close_transaction=close_transaction,
settle_transaction=settle_transaction,
)
return ContractReceiveChannelNew(
channel_state=channel_state,
transaction_hash=event.transaction_hash,
block_number=block_number,
block_hash=event.block_hash,
)
def contractreceivechanneldeposit_from_event(
event: DecodedEvent, fee_config: MediationFeeConfig
) -> ContractReceiveChannelDeposit:
data = event.event_data
args = data["args"]
block_number = event.block_number
return ContractReceiveChannelDeposit(
canonical_identifier=CanonicalIdentifier(
chain_identifier=event.chain_id,
token_network_address=TokenNetworkAddress(event.originating_contract),
channel_identifier=args["channel_identifier"],
),
deposit_transaction=TransactionChannelDeposit(
args["participant"], args["total_deposit"], block_number
),
transaction_hash=event.transaction_hash,
block_number=block_number,
block_hash=event.block_hash,
fee_config=fee_config,
)
def contractreceivechannelwithdraw_from_event(
event: DecodedEvent, fee_config: MediationFeeConfig
) -> ContractReceiveChannelWithdraw:
data = event.event_data
args = data["args"]
channel_identifier = args["channel_identifier"]
participant = args["participant"]
total_withdraw = args["total_withdraw"]
return ContractReceiveChannelWithdraw(
canonical_identifier=CanonicalIdentifier(
chain_identifier=event.chain_id,
token_network_address=TokenNetworkAddress(event.originating_contract),
channel_identifier=channel_identifier,
),
total_withdraw=total_withdraw,
participant=participant,
transaction_hash=event.transaction_hash,
block_number=event.block_number,
block_hash=event.block_hash,
fee_config=fee_config,
)
def contractreceivechannelclosed_from_event(
canonical_identifier: CanonicalIdentifier, event: DecodedEvent
) -> ContractReceiveChannelClosed:
data = event.event_data
args = data["args"]
# The from address is included in the ChannelClosed event as the
# closing_participant field
return ContractReceiveChannelClosed(
transaction_from=args["closing_participant"],
canonical_identifier=canonical_identifier,
transaction_hash=event.transaction_hash,
block_number=event.block_number,
block_hash=event.block_hash,
)
def contractreceiveupdatetransfer_from_event(
channel_state: NettingChannelState, event: DecodedEvent
) -> ContractReceiveUpdateTransfer:
data = event.event_data
args = data["args"]
return ContractReceiveUpdateTransfer(
canonical_identifier=channel_state.canonical_identifier,
nonce=args["nonce"],
transaction_hash=event.transaction_hash,
block_number=event.block_number,
block_hash=event.block_hash,
)
def contractreceivechannelsettled_from_event(
channel_settle_state: ChannelSettleState, event: DecodedEvent
) -> ContractReceiveChannelSettled:
data = event.event_data
block_number = data["block_number"]
block_hash = data["block_hash"]
transaction_hash = data["transaction_hash"]
# For saving gas, LOCKSROOT_OF_NO_LOCKS is stored as EMPTY_HASH onchain
if channel_settle_state.our_locksroot == EMPTY_LOCKSROOT:
our_locksroot = LOCKSROOT_OF_NO_LOCKS
else:
our_locksroot = channel_settle_state.our_locksroot
if channel_settle_state.partner_locksroot == EMPTY_LOCKSROOT:
partner_locksroot = LOCKSROOT_OF_NO_LOCKS
else:
partner_locksroot = channel_settle_state.partner_locksroot
return ContractReceiveChannelSettled(
transaction_hash=transaction_hash,
canonical_identifier=channel_settle_state.canonical_identifier,
our_onchain_locksroot=our_locksroot,
partner_onchain_locksroot=partner_locksroot,
block_number=block_number,
block_hash=block_hash,
)
def contractreceivesecretreveal_from_event(event: DecodedEvent) -> ContractReceiveSecretReveal:
secret_registry_address = event.originating_contract
data = event.event_data
args = data["args"]
return ContractReceiveSecretReveal(
secret_registry_address=SecretRegistryAddress(secret_registry_address),
secrethash=args["secrethash"],
secret=args["secret"],
transaction_hash=event.transaction_hash,
block_number=event.block_number,
block_hash=event.block_hash,
)
def contractreceivechannelbatchunlock_from_event(
canonical_identifier: CanonicalIdentifier, event: DecodedEvent
) -> ContractReceiveChannelBatchUnlock:
data = event.event_data
args = data["args"]
return ContractReceiveChannelBatchUnlock(
canonical_identifier=canonical_identifier,
receiver=args["receiver"],
sender=args["sender"],
locksroot=args["locksroot"],
unlocked_amount=args["unlocked_amount"],
returned_tokens=args["returned_tokens"],
transaction_hash=event.transaction_hash,
block_number=event.block_number,
block_hash=event.block_hash,
)
def update_service_addresses_from_event(event: DecodedEvent) -> UpdateServicesAddressesStateChange:
data = event.event_data
args = data["args"]
return UpdateServicesAddressesStateChange(
service=args["service_address"], valid_till=args["valid_till"]
)
def blockchainevent_to_statechange(
raiden_config: RaidenConfig,
proxy_manager: ProxyManager,
raiden_storage: SerializedSQLiteStorage,
chain_state: ChainState,
event: DecodedEvent,
current_confirmed_head: BlockNumber,
) -> Optional[StateChange]: # pragma: no unittest
event_name = event.event_data["event"]
if event_name == EVENT_TOKEN_NETWORK_CREATED:
return contractreceivenewtokennetwork_from_event(event)
elif event_name == ChannelEvent.OPENED:
new_channel_details = get_contractreceivechannelnew_data_from_event(chain_state, event)
if new_channel_details is not None:
fee_config = raiden_config.mediation_fees
channel_config = ChannelConfig(
reveal_timeout=raiden_config.reveal_timeout,
fee_schedule=FeeScheduleState(
cap_fees=fee_config.cap_meditation_fees,
flat=fee_config.get_flat_fee(new_channel_details.token_address),
proportional=fee_config.get_proportional_fee(new_channel_details.token_address)
# no need to set the imbalance fee here, will be set during deposit
),
)
return contractreceivechannelnew_from_event(new_channel_details, channel_config, event)
else:
return contractreceiveroutenew_from_event(event)
elif event_name == ChannelEvent.DEPOSIT:
return contractreceivechanneldeposit_from_event(event, raiden_config.mediation_fees)
elif event_name == ChannelEvent.WITHDRAW:
return contractreceivechannelwithdraw_from_event(event, raiden_config.mediation_fees)
elif event_name == ChannelEvent.BALANCE_PROOF_UPDATED:
channel_state = get_contractreceiveupdatetransfer_data_from_event(chain_state, event)
if channel_state:
return contractreceiveupdatetransfer_from_event(channel_state, event)
elif event_name == ChannelEvent.CLOSED:
canonical_identifier = get_contractreceivechannelclosed_data_from_event(chain_state, event)
if canonical_identifier is not None:
return contractreceivechannelclosed_from_event(canonical_identifier, event)
elif event_name == ChannelEvent.SETTLED:
channel_settle_state = get_contractreceivechannelsettled_data_from_event(
proxy_manager=proxy_manager,
chain_state=chain_state,
event=event,
current_confirmed_head=current_confirmed_head,
)
if channel_settle_state:
return contractreceivechannelsettled_from_event(channel_settle_state, event)
else:
log.debug("Discarding settle event, we're not part of it", raiden_event=event)
elif event_name == EVENT_SECRET_REVEALED:
return contractreceivesecretreveal_from_event(event)
elif event_name == ChannelEvent.UNLOCKED:
canonical_identifier = get_contractreceivechannelbatchunlock_data_from_event(
chain_state, raiden_storage, event
)
if canonical_identifier is not None:
return contractreceivechannelbatchunlock_from_event(canonical_identifier, event)
elif event_name == EVENT_REGISTERED_SERVICE:
return update_service_addresses_from_event(event)
else:
log.error("Unknown event type", raiden_event=event)
return None
|
the-stack_0_21521 |
import torch
import os
import time
import json
import numpy as np
from collections import defaultdict
from speaker import Speaker
from utils import read_vocab,write_vocab,build_vocab,Tokenizer,padding_idx,timeSince, read_img_features
import utils
from env import R2RBatch
from agent import Seq2SeqAgent
from eval import Evaluation
from param import args
import warnings
warnings.filterwarnings("ignore")
from tensorboardX import SummaryWriter
log_dir = 'snap/%s' % args.name
if not os.path.exists(log_dir):
os.makedirs(log_dir)
TRAIN_VOCAB = 'tasks/R2R/data/train_vocab.txt'
TRAINVAL_VOCAB = 'tasks/R2R/data/trainval_vocab.txt'
IMAGENET_FEATURES = 'img_features/ResNet-152-imagenet.tsv'
PLACE365_FEATURES = 'img_features/ResNet-152-places365.tsv'
if args.features == 'imagenet':
features = IMAGENET_FEATURES
if args.fast_train:
name, ext = os.path.splitext(features)
features = name + "-fast" + ext
feedback_method = args.feedback # teacher or sample
print(args)
def train_speaker(train_env, tok, n_iters, log_every=500, val_envs={}):
writer = SummaryWriter(logdir=log_dir)
listner = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = Speaker(train_env, listner, tok)
if args.fast_train:
log_every = 40
best_bleu = defaultdict(lambda: 0)
best_loss = defaultdict(lambda: 1232)
for idx in range(0, n_iters, log_every):
interval = min(log_every, n_iters - idx)
# Train for log_every interval
speaker.env = train_env
speaker.train(interval) # Train interval iters
print()
print("Iter: %d" % idx)
# Evaluation
for env_name, (env, evaluator) in val_envs.items():
if 'train' in env_name: # Ignore the large training set for the efficiency
continue
print("............ Evaluating %s ............." % env_name)
speaker.env = env
path2inst, loss, word_accu, sent_accu = speaker.valid()
path_id = next(iter(path2inst.keys()))
print("Inference: ", tok.decode_sentence(path2inst[path_id]))
print("GT: ", evaluator.gt[str(path_id)]['instructions'])
bleu_score, precisions = evaluator.bleu_score(path2inst)
# Tensorboard log
writer.add_scalar("bleu/%s" % (env_name), bleu_score, idx)
writer.add_scalar("loss/%s" % (env_name), loss, idx)
writer.add_scalar("word_accu/%s" % (env_name), word_accu, idx)
writer.add_scalar("sent_accu/%s" % (env_name), sent_accu, idx)
writer.add_scalar("bleu4/%s" % (env_name), precisions[3], idx)
# Save the model according to the bleu score
if bleu_score > best_bleu[env_name]:
best_bleu[env_name] = bleu_score
print('Save the model with %s BEST env bleu %0.4f' % (env_name, bleu_score))
speaker.save(idx, os.path.join(log_dir, 'state_dict', 'best_%s_bleu' % env_name))
if loss < best_loss[env_name]:
best_loss[env_name] = loss
print('Save the model with %s BEST env loss %0.4f' % (env_name, loss))
speaker.save(idx, os.path.join(log_dir, 'state_dict', 'best_%s_loss' % env_name))
# Screen print out
print("Bleu 1: %0.4f Bleu 2: %0.4f, Bleu 3 :%0.4f, Bleu 4: %0.4f" % tuple(precisions))
def train(train_env, tok, n_iters, log_every=100, val_envs={}, aug_env=None):
writer = SummaryWriter(logdir=log_dir)
listner = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = None
if args.self_train:
speaker = Speaker(train_env, listner, tok)
if args.speaker is not None:
print("Load the speaker from %s." % args.speaker)
speaker.load(args.speaker)
start_iter = 0
if args.load is not None:
print("LOAD THE listener from %s" % args.load)
start_iter = listner.load(os.path.join(args.load))
start = time.time()
best_val = {'val_seen': {"accu": 0., "state":"", 'update':False},
'val_unseen': {"accu": 0., "state":"", 'update':False}}
if args.fast_train:
log_every = 40
for idx in range(start_iter, start_iter+n_iters, log_every):
listner.logs = defaultdict(list)
interval = min(log_every, n_iters-idx)
iter = idx + interval
# Train for log_every interval
if aug_env is None: # The default training process
listner.env = train_env
listner.train(interval, feedback=feedback_method) # Train interval iters
else:
if args.accumulate_grad:
for _ in range(interval // 2):
listner.zero_grad()
listner.env = train_env
# Train with GT data
args.ml_weight = 0.2
listner.accumulate_gradient(feedback_method)
listner.env = aug_env
# Train with Back Translation
args.ml_weight = 0.6 # Sem-Configuration
listner.accumulate_gradient(feedback_method, speaker=speaker)
listner.optim_step()
else:
for _ in range(interval // 2):
# Train with GT data
listner.env = train_env
args.ml_weight = 0.2
listner.train(1, feedback=feedback_method)
# Train with Back Translation
listner.env = aug_env
args.ml_weight = 0.6
listner.train(1, feedback=feedback_method, speaker=speaker)
# Log the training stats to tensorboard
total = max(sum(listner.logs['total']), 1)
length = max(len(listner.logs['critic_loss']), 1)
critic_loss = sum(listner.logs['critic_loss']) / total #/ length / args.batchSize
entropy = sum(listner.logs['entropy']) / total #/ length / args.batchSize
predict_loss = sum(listner.logs['us_loss']) / max(len(listner.logs['us_loss']), 1)
writer.add_scalar("loss/critic", critic_loss, idx)
writer.add_scalar("policy_entropy", entropy, idx)
writer.add_scalar("loss/unsupervised", predict_loss, idx)
writer.add_scalar("total_actions", total, idx)
writer.add_scalar("max_length", length, idx)
print("total_actions", total)
print("max_length", length)
# Run validation
loss_str = ""
for env_name, (env, evaluator) in val_envs.items():
listner.env = env
# Get validation loss under the same conditions as training
iters = None if args.fast_train or env_name != 'train' else 20 # 20 * 64 = 1280
# Get validation distance from goal under test evaluation conditions
listner.test(use_dropout=False, feedback='argmax', iters=iters)
result = listner.get_results()
score_summary, _ = evaluator.score(result)
loss_str += ", %s " % env_name
for metric,val in score_summary.items():
if metric in ['success_rate']:
writer.add_scalar("accuracy/%s" % env_name, val, idx)
if env_name in best_val:
if val > best_val[env_name]['accu']:
best_val[env_name]['accu'] = val
best_val[env_name]['update'] = True
loss_str += ', %s: %.3f' % (metric, val)
for env_name in best_val:
if best_val[env_name]['update']:
best_val[env_name]['state'] = 'Iter %d %s' % (iter, loss_str)
best_val[env_name]['update'] = False
listner.save(idx, os.path.join("snap", args.name, "state_dict", "best_%s" % (env_name)))
print(('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),
iter, float(iter)/n_iters*100, loss_str)))
if iter % 1000 == 0:
print("BEST RESULT TILL NOW")
for env_name in best_val:
print(env_name, best_val[env_name]['state'])
if iter % 50000 == 0:
listner.save(idx, os.path.join("snap", args.name, "state_dict", "Iter_%06d" % (iter)))
listner.save(idx, os.path.join("snap", args.name, "state_dict", "LAST_iter%d" % (idx)))
def valid(train_env, tok, val_envs={}):
agent = Seq2SeqAgent(train_env, "", tok, args.maxAction)
print("Loaded the listener model at iter %d from %s" % (agent.load(args.load), args.load))
for env_name, (env, evaluator) in val_envs.items():
agent.logs = defaultdict(list)
agent.env = env
iters = None
agent.test(use_dropout=False, feedback='argmax', iters=iters)
result = agent.get_results()
if env_name != '':
score_summary, _ = evaluator.score(result)
loss_str = "Env name: %s" % env_name
for metric,val in score_summary.items():
loss_str += ', %s: %.4f' % (metric, val)
print(loss_str)
if args.submit:
json.dump(
result,
open(os.path.join(log_dir, "submit_%s.json" % env_name), 'w'),
sort_keys=True, indent=4, separators=(',', ': ')
)
def beam_valid(train_env, tok, val_envs={}):
listener = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = Speaker(train_env, listener, tok)
if args.speaker is not None:
print("Load the speaker from %s." % args.speaker)
speaker.load(args.speaker)
print("Loaded the listener model at iter % d" % listener.load(args.load))
final_log = ""
for env_name, (env, evaluator) in val_envs.items():
listener.logs = defaultdict(list)
listener.env = env
listener.beam_search_test(speaker)
results = listener.results
def cal_score(x, alpha, avg_speaker, avg_listener):
speaker_score = sum(x["speaker_scores"]) * alpha
if avg_speaker:
speaker_score /= len(x["speaker_scores"])
# normalizer = sum(math.log(k) for k in x['listener_actions'])
normalizer = 0.
listener_score = (sum(x["listener_scores"]) + normalizer) * (1-alpha)
if avg_listener:
listener_score /= len(x["listener_scores"])
return speaker_score + listener_score
if args.param_search:
# Search for the best speaker / listener ratio
interval = 0.01
logs = []
for avg_speaker in [False, True]:
for avg_listener in [False, True]:
for alpha in np.arange(0, 1 + interval, interval):
result_for_eval = []
for key in results:
result_for_eval.append({
"instr_id": key,
"trajectory": max(results[key]['paths'],
key=lambda x: cal_score(x, alpha, avg_speaker, avg_listener)
)['trajectory']
})
score_summary, _ = evaluator.score(result_for_eval)
for metric,val in score_summary.items():
if metric in ['success_rate']:
print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
(avg_speaker, avg_listener, alpha, val))
logs.append((avg_speaker, avg_listener, alpha, val))
tmp_result = "Env Name %s\n" % (env_name) + \
"Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f\n" % max(logs, key=lambda x: x[3])
print(tmp_result)
# print("Env Name %s" % (env_name))
# print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
# max(logs, key=lambda x: x[3]))
final_log += tmp_result
print()
else:
avg_speaker = True
avg_listener = True
alpha = args.alpha
result_for_eval = []
for key in results:
result_for_eval.append({
"instr_id": key,
"trajectory": [(vp, 0, 0) for vp in results[key]['dijk_path']] + \
max(results[key]['paths'],
key=lambda x: cal_score(x, alpha, avg_speaker, avg_listener)
)['trajectory']
})
# result_for_eval = utils.add_exploration(result_for_eval)
score_summary, _ = evaluator.score(result_for_eval)
if env_name != 'test':
loss_str = "Env Name: %s" % env_name
for metric, val in score_summary.items():
if metric in ['success_rate']:
print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
(avg_speaker, avg_listener, alpha, val))
loss_str += ",%s: %0.4f " % (metric, val)
print(loss_str)
print()
if args.submit:
json.dump(
result_for_eval,
open(os.path.join(log_dir, "submit_%s.json" % env_name), 'w'),
sort_keys=True, indent=4, separators=(',', ': ')
)
print(final_log)
def setup():
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# Check for vocabs
if not os.path.exists(TRAIN_VOCAB):
write_vocab(build_vocab(splits=['train']), TRAIN_VOCAB)
if not os.path.exists(TRAINVAL_VOCAB):
write_vocab(build_vocab(splits=['train','val_seen','val_unseen']), TRAINVAL_VOCAB)
def train_val():
''' Train on the training set, and validate on seen and unseen splits. '''
# args.fast_train = True
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(TRAIN_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)
feat_dict = read_img_features(features)
featurized_scans = set([key.split("_")[0] for key in list(feat_dict.keys())])
train_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok)
from collections import OrderedDict
val_env_names = ['val_unseen', 'val_seen']
if args.submit:
val_env_names.append('test')
else:
pass
#val_env_names.append('train')
if not args.beam:
val_env_names.append("train")
val_envs = OrderedDict(
((split,
(R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split], tokenizer=tok),
Evaluation([split], featurized_scans, tok))
)
for split in val_env_names
)
)
if args.train == 'listener':
train(train_env, tok, args.iters, val_envs=val_envs)
elif args.train == 'validlistener':
if args.beam:
beam_valid(train_env, tok, val_envs=val_envs)
else:
valid(train_env, tok, val_envs=val_envs)
elif args.train == 'speaker':
train_speaker(train_env, tok, args.iters, val_envs=val_envs)
elif args.train == 'validspeaker':
valid_speaker(tok, val_envs)
else:
assert False
def valid_speaker(tok, val_envs):
import tqdm
listner = Seq2SeqAgent(None, "", tok, args.maxAction)
speaker = Speaker(None, listner, tok)
speaker.load(args.load)
for env_name, (env, evaluator) in val_envs.items():
if env_name == 'train':
continue
print("............ Evaluating %s ............." % env_name)
speaker.env = env
path2inst, loss, word_accu, sent_accu = speaker.valid(wrapper=tqdm.tqdm)
path_id = next(iter(path2inst.keys()))
print("Inference: ", tok.decode_sentence(path2inst[path_id]))
print("GT: ", evaluator.gt[path_id]['instructions'])
pathXinst = list(path2inst.items())
name2score = evaluator.lang_eval(pathXinst, no_metrics={'METEOR'})
score_string = " "
for score_name, score in name2score.items():
score_string += "%s_%s: %0.4f " % (env_name, score_name, score)
print("For env %s" % env_name)
print(score_string)
print("Average Length %0.4f" % utils.average_length(path2inst))
def train_val_augment():
"""
Train the listener with the augmented data
"""
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(TRAIN_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)
# Load the env img features
feat_dict = read_img_features(features)
featurized_scans = set([key.split("_")[0] for key in list(feat_dict.keys())])
# Load the augmentation data
aug_path = args.aug
# Create the training environment
train_env = R2RBatch(feat_dict, batch_size=args.batchSize,
splits=['train'], tokenizer=tok)
aug_env = R2RBatch(feat_dict, batch_size=args.batchSize,
splits=[aug_path], tokenizer=tok, name='aug')
# Printing out the statistics of the dataset
stats = train_env.get_statistics()
print("The training data_size is : %d" % train_env.size())
print("The average instruction length of the dataset is %0.4f." % (stats['length']))
print("The average action length of the dataset is %0.4f." % (stats['path']))
stats = aug_env.get_statistics()
print("The augmentation data size is %d" % aug_env.size())
print("The average instruction length of the dataset is %0.4f." % (stats['length']))
print("The average action length of the dataset is %0.4f." % (stats['path']))
# Setup the validation data
val_envs = {split: (R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split],
tokenizer=tok), Evaluation([split], featurized_scans, tok))
for split in ['train', 'val_seen', 'val_unseen']}
# Start training
train(train_env, tok, args.iters, val_envs=val_envs, aug_env=aug_env)
if __name__ == "__main__":
if args.train in ['speaker', 'rlspeaker', 'validspeaker',
'listener', 'validlistener']:
train_val()
elif args.train == 'auglistener':
train_val_augment()
else:
assert False
|
the-stack_0_21522 | import random
import numpy as np
# Basic parameters of a 117M GPT2 network
gpt2_small = {
"n_head": 12,
"encoder_path": "gs://openwebtext/stuff/encoder",
"n_vocab": 50257,
"embed_dropout": 0.1,
"lr": 0.00025,
"warmup_steps": 2000,
"beta1": 0.9,
"beta2": 0.98,
"epsilon": 1e-9,
"opt_name": "adam",
"weight_decay": 0.01,
"train_batch_size": 32,
"attn_dropout": 0.1,
"train_steps": 10000,
"eval_steps": 10,
"max_steps": 500000,
"data_path": "gs://connors-datasets/openwebtext/",
"scale": 0.2886751345948129,
"res_dropout": 0.1,
"predict_batch_size": 1,
"eval_batch_size": 32,
"iterations": 500,
"n_embd": 768,
"input": "openwebtext",
"model": "GPT2",
"model_path": "gs://connors-models/GPT2-117M",
"n_ctx": 1024,
"predict_path": "logs/predictions.txt",
"n_layer": 12
}
# Running parameters, each experiment needs a unique name, whether the TPU should be preemptible
# what type of TPU to use (GPUs TODO) and the actual model parameters
experiment_base = {
"name": "gpt2_small",
"preemptible": True,
"accelerator_type": "v2-8",
"model_params": gpt2_small,
}
# A class defining a hyperparameter to vary
class HyperParameter(object):
def __init__(self, name, distribution, model=True, dtype=None, values=None):
self.name = name # Name of the parameter to vary
self.distribution = distribution # Which distribution should be used to generate the values, one of grid, sample, uniform or geometric
self.values = values # Values to use by the distribution
self.dtype = dtype # Whether to generate floats or ints, if necessary
self.model = model # Whether the parameter belongs in the model_params or not
if distribution == "grid" or distribution == "sample":
assert type(values) == type([])
self.values = values
self.index = 0
elif distribution == "uniform":
assert type(values) == type([])
assert len(values) == 2
elif distribution == "geometric":
assert type(values) == type([])
assert len(values) == 2
def get_value(self):
# Simply iterate over a list of values
if self.distribution == "grid":
if self.index < len(self.values):
val = self.values[self.index]
self.index += 1
return val
else:
raise RuntimeError("{} ran out of values!".format(self.name))
# Sample randomly from a list of values
elif self.distribution == "sample":
return random.sample(self.values)
# Sample from a uniform distribution
elif self.distribution == "uniform":
if self.dtype == "float":
return random.uniform(self.values[0], self.values[1])
else:
return int(random.uniform(self.values[0], self.values[1]))
# Sample from a "geometric" distribution
# A sample is drawn from the uniform distribution from log(A) to log(B) and then expontiated to generate the value
elif self.distribution == "geometric":
if self.dtype == "float":
return np.exp(np.random.uniform(np.log(self.values[0]), np.log(self.values[1])))
else:
return int(np.exp(np.random.uniform(np.log(self.values[0]), np.log(self.values[1]))))
# Given the base parameters of the model, list of parameter to vary and a number, generates number amount of experiments to run
def generate_experiments(base, parameters, number):
experiments = []
for i in range(number):
ex = base.copy()
ex["name"] = ex["name"] + "-" + str(i)
ex["model_params"]["model_dir"] = ex["model_params"]["model_dir"] + "-" + str(i)
for p in parameters:
if p.model:
ex["model_params"][p.name] = p.get_value()
else:
ex[p.name] = p.get_value()
experiments.append(ex)
return experiments
parameters = [
HyperParameter("lr", "geometric", values=[1e-5, 1e-1], dtype="float"),
HyperParameter("input", "sample", values=["openwebtext", "openwebtext_long", "openwebtext_longbiased"]),
HyperParameter("n_layers", "uniform", values=[12, 24])
]
# This is what is exported to Overrunner to run
experiments = generate_experiments(experiment_base, parameters, 10)
|
the-stack_0_21524 | import re
import copy
import random as rn
__all__ = ['Markold']
class Markold:
def __init__(self):
self.sentences = []
self.saved_matrixes = {}
def import_sentences(self, file):
self.sentences = []
self.saved_matrixes = {}
if isinstance(file, list):
self.sentences = file
else:
with open(file, 'r', encoding='utf-8') as input_file:
self.sentences = input_file.read().split('\n')
def beautify(self, sentences):
""" Reformats sentences by adding a space before and after punctuation
(to treat them as regular sentence parts). """
translator = str.maketrans({key: " {0} ".format(key) for key in ','})
if isinstance(sentences, str):
return re.sub(r'\s+', ' ', sentences.translate(translator)).strip()
elif isinstance(sentences, list):
new_lst = []
for sentence in sentences:
new_lst.append(re.sub(r'\s+', ' ', sentence.translate(translator)).strip())
return new_lst
return sentences
def reformate_sentence(self, sentence):
""" Reformat the sentence correctly, such as punctuation have the correct spacing. """
sentence = sentence.replace(' , ', ', ')
sentence = sentence.replace(' \' ', '\'')
return sentence
def get_key(self, words, i, n):
""" Returns a key consisting of n words. """
new_key = []
for j in range(n):
new_key.append(words[i+j])
return tuple(new_key)
def compute_words(self, sentences, nb):
""" Returns a set of unique pair of words across all the sentences. """
n_words_set = set()
for sentence in sentences:
words = sentence.split(' ')
if len(words) > (nb - 1):
for i, _ in enumerate(words):
if i < len(words) - nb + 1:
n_words_set.add(self.get_key(words, i, nb))
return n_words_set
def compute_word_occurence(self, sentences, word_order, n):
""" Computes the number of occurences of each words following each pair of words. """
matrix = {}
for word in word_order:
matrix[word] = {}
for sentence in sentences:
splitted = sentence.split(' ')
if len(splitted) > n - 1:
for i, word in enumerate(splitted):
if i < len(splitted) - n + 1:
key = self.get_key(splitted, i, n)
else:
break
if i == 0:
if 'BEGIN' in matrix[key]:
matrix[key]['BEGIN'] += 1
else:
matrix[key]['BEGIN'] = 1
if i == len(splitted) - n:
if 'END' in matrix[key]:
matrix[key]['END'] += 1
else:
matrix[key]['END'] = 1
if i < len(splitted) - n:
if splitted[i+n] in matrix[key]:
matrix[key][splitted[i+n]] += 1
else:
matrix[key][splitted[i+n]] = 1
return matrix
def cumulative_probs(self, lst):
""" Orders the a list of probabilities and transforms them into cumulative probabilities. """
total_sum = sum([x[1] for x in lst])
# Normalising (-> probs)
lst = [[x[0], x[1] / total_sum] for x in lst]
# Ordering probs
lst = sorted(lst, key=lambda x: x[1], reverse=False)
# Cumulative probs
for i, _ in enumerate(lst):
if i != 0:
lst[i][1] += lst[i-1][1]
return lst
def return_selected(self, random_choice, lst):
""" Chose a random word from a probabilities list. """
for i, word in enumerate(lst):
if i == 0:
if random_choice < word[1]:
return word[0]
if i == len(lst) - 1:
return word[0]
else:
if lst[i-1][1] <= random_choice < lst[i][1]:
return word[0]
# Sometimes, lst is empty
return lst[0][0]
def normalise_word_matrix(self, word_matrix):
""" Normalises the number of occurences of each word (from occurences to probabilities). """
new_matrix = copy.deepcopy(word_matrix)
for word, probs in new_matrix.items():
total_sum = sum(probs.values())
for next_word_prob in probs.keys():
new_matrix[word][next_word_prob] = new_matrix[word][next_word_prob] / total_sum
return new_matrix
def compute_word_matrix(self, markov):
# Add spaces before and after quotes and commas
trimmed = self.beautify(self.sentences)
# Get all unique words across all sentences
word_set = self.compute_words(trimmed, markov)
# Compute the number of occurence of each words
word_prob_matrix = self.compute_word_occurence(trimmed, word_set, markov)
# Transforms occurences into cumulative probabilities
word_prob_matrix_normalised = self.normalise_word_matrix(word_prob_matrix)
self.saved_matrixes[markov] = {'wpm': word_prob_matrix, 'wpmn': word_prob_matrix_normalised}
def generate_sentence(self, markov, min_word_length=0, max_word_length=50):
""" Generates a sentence from a list of word probabilities. """
new_sentence = ''
first_word = []
last_word = ''
if not markov in self.saved_matrixes.keys():
self.compute_word_matrix(markov)
wpm = self.saved_matrixes[markov]['wpm']
wpmn = self.saved_matrixes[markov]['wpmn']
# Get every couple of words that can start a sentence
for word, next_word_proba in wpm.items():
if 'BEGIN' in next_word_proba:
first_word.append([word, wpm[word]['BEGIN']])
first_word = self.cumulative_probs(first_word)
# Choose a random one
random_choice = rn.random()
first_choice = self.return_selected(random_choice, first_word)
# We got our first couple of words. Yay!
new_sentence += ' '.join(first_choice) + ' '
last_word = first_choice
iteration = 0
while iteration <= max_word_length or 'END' not in next_word_proba.keys():
#BUG: Sometimes, the algorithm get stuck in an infinite loop between 2 words
if iteration > max_word_length * 2:
print(f'WARNING: endless loop between two words, invalid sentence (ditched)')
return ''
# BUG: shouldn't happen (but it did)
try:
# Get the probable words following the last one
next_word_proba = wpmn[last_word]
except KeyError:
break
next_word_proba_lst = [[k, v] for k, v in next_word_proba.items() if k != 'BEGIN']
next_word_proba_lst = self.cumulative_probs(next_word_proba_lst)
# If we have reached the maximum number of words allowed and we can end here, do it
if iteration > max_word_length and any(x[0] == 'END' for x in next_word_proba_lst):
break
# Else, pick a random one
else:
random_choice = rn.random()
choice = self.return_selected(random_choice, next_word_proba_lst)
# If we chose that this is the end of the sentence
if choice == 'END':
# If we reached the minimal number of words in the sentence, we're done
if iteration >= min_word_length:
break
else:
# Else, check if there are other possibilities than END
removed_end_cumulative = [x for x in next_word_proba_lst if x[0] != 'END']
if removed_end_cumulative:
random_choice = rn.random()
choice = tuple([self.return_selected(random_choice, removed_end_cumulative)])
# Else, we have no other choice than finishing the sentence
else:
break
# Else, take a random couple of words beginning with the choosen word
else:
lst = [k for k in wpm.keys() if k[0] == choice]
if lst:
choice = lst[rn.randint(0, len(lst) - 1)]
else:
break
# Continue until we have reached the maximum number of words allowed
new_sentence += ' '.join(choice) + ' '
last_word = choice
iteration += 1
return new_sentence
def generate_multiple_sentences(self, markov, n, min_word_length=0, max_word_length=50, to_output=None, to_print=None):
generated_sentences = []
for x in range(n):
print(f'Generating sentence {x}...', end=' ')
generated_sentences.append(self.reformate_sentence(self.generate_sentence(markov, min_word_length=min_word_length,
max_word_length=max_word_length)))
print(f'sentence generated.')
if to_output:
output_file = open(to_output, 'a', encoding='utf-8')
if to_output or to_print:
for sentence in generated_sentences:
if to_print:
print(sentence)
if to_output:
output_file.write(sentence + '\n')
if to_output:
output_file.close()
return generated_sentences
|
the-stack_0_21525 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_request_site_address, encode
from frappe.model.document import Document
from urllib import quote
from frappe.website.router import resolve_route
class WebsiteSettings(Document):
def validate(self):
self.validate_top_bar_items()
self.validate_footer_items()
self.validate_home_page()
def validate_home_page(self):
if frappe.flags.in_install:
return
if self.home_page and not resolve_route(self.home_page):
frappe.throw(_("Invalid Home Page") + " (Standard pages - index, login, products, blog, about, contact)")
def validate_top_bar_items(self):
"""validate url in top bar items"""
for top_bar_item in self.get("top_bar_items"):
if top_bar_item.parent_label:
parent_label_item = self.get("top_bar_items", {"label": top_bar_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(top_bar_item.parent_label, top_bar_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(top_bar_item.parent_label,
top_bar_item.idx))
def validate_footer_items(self):
"""clear parent label in footer"""
for footer_item in self.get("footer_items"):
footer_item.parent_label = None
def on_update(self):
# make js and css
# clear web cache (for menus!)
from frappe.website.render import clear_cache
clear_cache()
# clears role based home pages
frappe.clear_cache()
def get_website_settings():
hooks = frappe.get_hooks()
all_top_items = frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield='top_bar_items'
order by idx asc""", as_dict=1)
top_items = [d for d in all_top_items if not d['parent_label']]
# attach child items to top bar
for d in all_top_items:
if d['parent_label']:
for t in top_items:
if t['label']==d['parent_label']:
if not 'child_items' in t:
t['child_items'] = []
t['child_items'].append(d)
break
context = frappe._dict({
'top_bar_items': top_items,
'footer_items': frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield='footer_items'
order by idx asc""", as_dict=1),
"post_login": [
{"label": "Reset Password", "url": "update-password", "icon": "icon-key"},
{"label": "Logout", "url": "/?cmd=web_logout", "icon": "icon-signout"}
]
})
settings = frappe.get_doc("Website Settings", "Website Settings")
for k in ["banner_html", "brand_html", "copyright", "twitter_share_via",
"favicon", "facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup", "no_sidebar"]:
if hasattr(settings, k):
context[k] = settings.get(k)
if not context.get("favicon"):
context["favicon"] = "/assets/frappe/images/favicon.ico"
if settings.address:
context["footer_address"] = settings.address
for k in ["facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup"]:
context[k] = int(context.get(k) or 0)
if frappe.request:
context.url = quote(str(get_request_site_address(full_address=True)), safe="/:")
context.encoded_title = quote(encode(context.title or ""), str(""))
for update_website_context in hooks.update_website_context or []:
frappe.get_attr(update_website_context)(context)
context.web_include_js = hooks.web_include_js or []
context.web_include_css = hooks.web_include_css or []
return context
|
the-stack_0_21526 | """
mtbtn module. Contains the Mt3dBtn class. Note that the user can access
the Mt3dBtn class as `flopy.mt3d.Mt3dBtn`.
Additional information for this MT3DMS package can be found in the MT3DMS
User's Manual.
"""
import numpy as np
from ..pakbase import Package
from ..utils import Util2d, Util3d
import warnings
class Mt3dBtn(Package):
"""
Basic Transport Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.mt3dms.mt.Mt3dms`) to which
this package will be added.
MFStyleArr : str
Specifies whether or not to read arrays using the MODFLOW array reader
format or the original MT3DMS array reader
DRYCell : str
Specifies whether or not to route mass through dry cells. When MF-NWT
is used to generate the flow-transport link file, this is a distinct
possibility.
Legacy99Stor : str
Specifies whether or not to use the storage formulation used in MT3DMS
FTLPrint : str
Specifies if flow-transport link terms (cell-by-cell flows) should be
echoed to the MT3D-USGS listing file.
NoWetDryPrint : str
Specifies whether or not to suppress wet/dry messaging in the MT3D-USGS
listing file.
OmitDryBudg : str
Specifies whether or not to include the mass flux terms through dry
cells in the mass budget written to the listing file.
AltWTSorb : str
Specifies whether or not to use the MT3DMS formulation (this keyword
omitted) for the solid phase, whereby the entire cell thickness is
available for interacting with the aqueous phase, even though the
aqueous phase may only occupy a portion of the cell's thickness. When
used, only the saturated portion of the cell is available for sorbing
ncomp : int
The total number of chemical species in the simulation. (default is
None, will be changed to 1 if sconc is single value)
mcomp : int
The total number of 'mobile' species (default is 1). mcomp must be
equal or less than ncomp.
tunit : str
The name of unit for time (default is 'D', for 'days'). Used for
identification purposes only.
lunit : str
The name of unit for length (default is 'M', for 'meters'). Used for
identification purposes only.
munit : str
The name of unit for mass (default is 'KG', for 'kilograms'). Used for
identification purposes only.
prsity : float or array of floats (nlay, nrow, ncol)
The effective porosity of the porous medium in a single porosity
system, or the mobile porosity in a dual-porosity medium (the immobile
porosity is defined through the Chemical Reaction Package. (default is
0.25).
icbund : int or array of ints (nlay, nrow, ncol)
The icbund array specifies the boundary condition type for solute
species (shared by all species). If icbund = 0, the cell is an inactive
concentration cell; If icbund < 0, the cell is a constant-concentration
cell; If icbund > 0, the cell is an active concentration cell where the
concentration value will be calculated. (default is 1).
sconc : float, array of (nlay, nrow, ncol), or filename
sconc is the starting concentration for the first species. To specify
starting concentrations for other species in a multi-species
simulation, include additional keywords, such as sconc2, sconc3, and
so forth.
cinact : float
The value for indicating an inactive concentration cell. (default is
1e30).
thkmin : float
The minimum saturated thickness in a cell, expressed as the decimal
fraction of its thickness, below which the cell is considered inactive.
(default is 0.01).
ifmtcn : int
A flag/format code indicating how the calculated concentration should
be printed to the standard output text file. Format codes for printing
are listed in Table 3 of the MT3DMS manual. If ifmtcn > 0 printing is
in wrap form; ifmtcn < 0 printing is in strip form; if ifmtcn = 0
concentrations are not printed. (default is 0).
ifmtnp : int
A flag/format code indicating how the number of particles should
be printed to the standard output text file. The convention is
the same as for ifmtcn. (default is 0).
ifmtrf : int
A flag/format code indicating how the calculated retardation factor
should be printed to the standard output text file. The convention is
the same as for ifmtcn. (default is 0).
ifmtdp : int
A flag/format code indicating how the distance-weighted dispersion
coefficient should be printed to the standard output text file. The
convention is the same as for ifmtcn. (default is 0).
savucn : bool
A logical flag indicating whether the concentration solution should be
saved in an unformatted file. (default is True).
nprs : int
A flag indicating (i) the frequency of the output and
(ii) whether the output frequency is specified in terms
of total elapsed simulation time or the transport step number. If
nprs > 0 results will be saved at the times as specified in timprs;
if nprs = 0, results will not be saved except at the end of simulation;
if NPRS < 0, simulation results will be saved whenever the number of
transport steps is an even multiple of nprs. (default is 0).
timprs : list of floats
The total elapsed time at which the simulation results are saved. The
number of entries in timprs must equal nprs. (default is None).
obs: array of int
An array with the cell indices (layer, row, column) for which the
concentration is to be printed at every transport step. (default is
None). obs indices must be entered as zero-based numbers as a 1 is
added to them before writing to the btn file.
nprobs: int
An integer indicating how frequently the concentration at the specified
observation points should be saved. (default is 1).
chkmas: bool
A logical flag indicating whether a one-line summary of mass balance
information should be printed. (default is True).
nprmas: int
An integer indicating how frequently the mass budget information
should be saved. (default is 1).
dt0: float
The user-specified initial transport step size within each time-step
of the flow solution. (default is 0).
mxstrn: int
The maximum number of transport steps allowed within one time step
of the flow solution. (default is 50000).
ttsmult: float
The multiplier for successive transport steps within a flow time-step
if the GCG solver is used and the solution option for the advection
term is the standard finite-difference method. (default is 1.0).
ttsmax: float
The maximum transport step size allowed when transport step size
multiplier TTSMULT > 1.0. (default is 0).
species_names: list of str
A list of names for every species in the simulation.
extension : string
Filename extension (default is 'btn')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package. If filenames=None the package name
will be created using the model name and package extension. If a
single string is passed the package will be set to the string.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> mt = flopy.mt3dms.Mt3dms()
>>> btn = flopy.mt3dms.Mt3dBtn(mt)
"""
def __init__(self, model, MFStyleArr=False, DRYCell=False,
Legacy99Stor=False, FTLPrint=False, NoWetDryPrint=False,
OmitDryBud=False, AltWTSorb=False, nlay=None, nrow=None,
ncol=None, nper=None, ncomp=1, mcomp=1, tunit='D', lunit='M',
munit='KG', laycon=None, delr=None, delc=None, htop=None,
dz=None, prsity=0.30, icbund=1,
sconc=0.0, cinact=1e30, thkmin=0.01, ifmtcn=0, ifmtnp=0,
ifmtrf=0, ifmtdp=0, savucn=True, nprs=0, timprs=None,
obs=None, nprobs=1, chkmas=True, nprmas=1,
perlen=None, nstp=None, tsmult=None, ssflag=None, dt0=0,
mxstrn=50000, ttsmult=1.0, ttsmax=0,
species_names=None, extension='btn',
unitnumber=None, filenames=None,
**kwargs):
if unitnumber is None:
unitnumber = Mt3dBtn.defaultunit()
elif unitnumber == 0:
unitnumber = Mt3dBtn.reservedunit()
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [Mt3dBtn.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
# Set these variables from the Modflow model (self.parent.mf) unless
# they are specified in the constructor.
self.setmodflowvars(nlay, nrow, ncol, nper, laycon, delr, delc, htop,
dz, perlen, nstp, tsmult)
# Make the rest of the assignments
self.heading1 = '# BTN for MT3DMS, generated by Flopy.'
self.heading2 = '#'
self.MFStyleArr = MFStyleArr
if self.MFStyleArr:
model.free_format = True
model.array_format = None
self.DRYCell = DRYCell
self.Legacy99Stor = Legacy99Stor
self.FTLPrint = FTLPrint
self.NoWetDryPrint = NoWetDryPrint
self.OmitDryBud = OmitDryBud
self.AltWTSorb = AltWTSorb
self.ncomp = ncomp
self.mcomp = mcomp
self.tunit = tunit
self.lunit = lunit
self.munit = munit
self.cinact = cinact
self.thkmin = thkmin
self.ifmtcn = ifmtcn
self.ifmtnp = ifmtnp
self.ifmtrf = ifmtrf
self.ifmtdp = ifmtdp
self.savucn = savucn
self.nprs = nprs
self.timprs = timprs
if obs is not None:
if isinstance(obs, list):
obs = np.array(obs)
if obs.ndim != 2:
raise Exception(
'obs must be (or be convertible to) a 2d array')
self.obs = obs
self.nprobs = nprobs
self.chkmas = chkmas
self.nprmas = nprmas
if species_names is None:
species_names = []
self.species_names = species_names
self.prsity = Util3d(model, (self.nlay, self.nrow, self.ncol),
np.float32, prsity, name='prsity',
locat=self.unit_number[0],
array_free_format=False)
self.icbund = Util3d(model, (self.nlay, self.nrow, self.ncol),
np.int32,
icbund, name='icbund',
locat=self.unit_number[0],
array_free_format=False)
self.ssflag = ssflag
self.dt0 = Util2d(model, (self.nper,), np.float32, dt0, name='dt0',
array_free_format=False)
self.mxstrn = Util2d(model, (self.nper,), np.int32, mxstrn,
name='mxstrn')
self.ttsmult = Util2d(model, (self.nper,), np.float32, ttsmult,
name='ttmult')
self.ttsmax = Util2d(model, (self.nper,), np.float32, ttsmax,
name='ttsmax')
# Do some fancy stuff for multi-species concentrations
self.sconc = []
u3d = Util3d(model, (self.nlay, self.nrow, self.ncol), np.float32,
sconc, name='sconc1', locat=self.unit_number[0],
array_free_format=False)
self.sconc.append(u3d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "sconc" + str(icomp)
val = 0.0
if name in kwargs:
val = kwargs.pop(name)
else:
print("BTN: setting sconc for component " +
str(icomp) + " to zero, kwarg name " +
name)
u3d = Util3d(model, (self.nlay, self.nrow, self.ncol),
np.float32, val, name=name,
locat=self.unit_number[0],
array_free_format=False)
self.sconc.append(u3d)
# Check to make sure that all kwargs have been consumed
if len(list(kwargs.keys())) > 0:
raise Exception("BTN error: unrecognized kwargs: " +
' '.join(list(kwargs.keys())))
# Finally add self to parent's package list and return
self.parent.add_package(self)
return
def setmodflowvars(self, nlay, nrow, ncol, nper, laycon, delr, delc, htop,
dz, perlen, nstp, tsmult):
"""
Set these variables from the MODFLOW model, if it exists
"""
# Members that may come from a modflow model (model.mf)
validmfdis = True
try:
dum = self.parent.mf.dis.nlay
mf = self.parent.mf
except:
validmfdis = False
mfvarlist = [nlay, nrow, ncol, nper, laycon, delr, delc, htop, dz,
perlen, nstp, tsmult]
if not validmfdis:
for v in mfvarlist:
s = 'BTN error. Required input is None, but no modflow model.'
s += ' If no modflow model is passed to Mt3dms, then values '
s += 'must be specified in the BTN constructor for: '
s += 'nlay, nrow, ncol, nper, laycon, delr, delc, htop, dz, '
s += 'perlen, nstp, and tsmult.'
if v is None:
raise Exception(s)
if nlay is not None:
self.nlay = nlay
else:
self.nlay = mf.dis.nlay
if nrow is not None:
self.nrow = nrow
else:
self.nrow = mf.dis.nrow
if ncol is not None:
self.ncol = ncol
else:
self.ncol = mf.dis.ncol
if nper is not None:
self.nper = nper
else:
self.nper = mf.dis.nper
nlay = self.nlay
nrow = self.nrow
ncol = self.ncol
nper = self.nper
if delr is not None:
self.delr = Util2d(self.parent, (ncol,), np.float32, delr,
name='delr',
locat=self.unit_number[0],
array_free_format=False)
else:
self.delr = Util2d(self.parent, (ncol,), np.float32,
mf.dis.delr.get_value(),
name='delr',
locat=self.unit_number[0],
array_free_format=False)
if delc is not None:
self.delc = Util2d(self.parent, (nrow,), np.float32, delc,
name='delc',
locat=self.unit_number[0])
else:
self.delc = Util2d(self.parent, (nrow,), np.float32,
mf.dis.delc.get_value(),
name='delc',
locat=self.unit_number[0],
array_free_format=False)
if htop is not None:
self.htop = Util2d(self.parent, (nrow, ncol), np.float32, htop,
name='htop',
locat=self.unit_number[0],
array_free_format=False)
else:
self.htop = Util2d(self.parent, (nrow, ncol), np.float32,
mf.dis.top.get_value(),
name='htop',
locat=self.unit_number[0],
array_free_format=False)
if dz is not None:
self.dz = Util3d(self.parent, (nlay, nrow, ncol), np.float32, dz,
name='dz',
locat=self.unit_number[0],
array_free_format=False)
else:
thickness = mf.dis.thickness.get_value()
self.dz = Util3d(self.parent, (nlay, nrow, ncol), np.float32,
thickness, name='dz',
locat=self.unit_number[0],
array_free_format=False)
if perlen is not None:
self.perlen = Util2d(self.parent, (nper,), np.float32, perlen,
name='perlen',
locat=self.unit_number[0])
else:
self.perlen = Util2d(self.parent, (nper,), np.float32,
mf.dis.perlen.get_value(),
name='perlen',
locat=self.unit_number[0])
if nstp is not None:
self.nstp = Util2d(self.parent, (nper,), np.int32, nstp,
name='nstp',
locat=self.unit_number[0])
else:
self.nstp = Util2d(self.parent, (nper,), np.int32,
mf.dis.nstp.get_value(),
name='nstp',
locat=self.unit_number[0])
if tsmult is not None:
self.tsmult = Util2d(self.parent, (nper,), np.float32, tsmult,
name='tsmult',
locat=self.unit_number[0])
else:
self.tsmult = Util2d(self.parent, (nper,), np.float32,
mf.dis.tsmult.get_value(),
name='tsmult',
locat=self.unit_number[0])
self.laycon = None
if laycon is not None:
self.laycon = Util2d(self.parent, (nlay,), np.int32, laycon,
name='laycon',
locat=self.unit_number[0])
else:
flow_package = mf.get_package('BCF6')
if flow_package is not None:
self.laycon = Util2d(self.parent, (nlay,), np.int32,
flow_package.laycon.get_value(),
name='laycon',
locat=self.unit_number[0])
else:
flow_package = mf.get_package('LPF')
if flow_package is not None:
self.laycon = Util2d(self.parent, (nlay,),
np.int32,
flow_package.laytyp.get_value(),
name='laycon',
locat=self.unit_number[0])
flow_package = mf.get_package('UPW')
if flow_package is not None:
self.laycon = Util2d(self.parent, (nlay,),
np.int32,
flow_package.laytyp.get_value(),
name='laycon',
locat=self.unit_number[0])
s = 'BTN warning. Laycon has not been set. A modflow model with a '
s += ' BCF or LPF package does not exist and laycon was not passed '
s += ' to the BTN constructor. Setting laycon to 1 (convertible).'
if self.laycon is None:
warnings.warn(s)
self.laycon = Util2d(self.parent, (nlay,), np.int32, 1,
name='laycon',
locat=self.unit_number[0])
return
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
# Open file for writing
f_btn = open(self.fn_path, 'w')
# A1,2
f_btn.write('#{0:s}\n#{1:s}\n'.format(self.heading1, self.heading2))
# A3; Keywords
# Build a string of the active keywords
str1 = ''
if self.MFStyleArr:
str1 += ' MODFLOWSTYLEARRAYS'
if self.DRYCell:
str1 += ' DRYCELL'
if self.Legacy99Stor:
str1 += ' LEGACY99STORAGE'
if self.FTLPrint:
str1 += ' FTLPRINT'
if self.NoWetDryPrint:
str1 += ' NOWETDRYPRINT'
if self.OmitDryBud:
str1 += ' OMITDRYCELLBUDGET'
if self.AltWTSorb:
str1 += ' ALTWTSORB'
if str1 != '':
f_btn.write(str1 + '\n')
# A3
f_btn.write('{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}\n'
.format(self.nlay, self.nrow, self.ncol, self.nper,
self.ncomp, self.mcomp))
# A4
f_btn.write('{0:4s}{1:4s}{2:4s}\n' \
.format(self.tunit, self.lunit, self.munit))
# A5
if (self.parent.adv != None):
f_btn.write('{0:2s}'.format('T'))
else:
f_btn.write('{0:2s}'.format('F'))
if (self.parent.dsp != None):
f_btn.write('{0:2s}'.format('T'))
else:
f_btn.write('{0:2s}'.format('F'))
if (self.parent.ssm != None):
f_btn.write('{0:2s}'.format('T'))
else:
f_btn.write('{0:2s}'.format('F'))
if (self.parent.rct != None):
f_btn.write('{0:2s}'.format('T'))
else:
f_btn.write('{0:2s}'.format('F'))
if (self.parent.gcg != None):
f_btn.write('{0:2s}'.format('T'))
else:
f_btn.write('{0:2s}'.format('F'))
f_btn.write('\n')
# A6
self.laycon.set_fmtin('(40I2)')
f_btn.write(self.laycon.string)
# A7
f_btn.write(self.delr.get_file_entry())
# A8
f_btn.write(self.delc.get_file_entry())
# A9
f_btn.write(self.htop.get_file_entry())
# A10
f_btn.write(self.dz.get_file_entry())
# A11
f_btn.write(self.prsity.get_file_entry())
# A12
f_btn.write(self.icbund.get_file_entry())
# A13
# Starting concentrations
for s in range(len(self.sconc)):
f_btn.write(self.sconc[s].get_file_entry())
# A14
f_btn.write('{0:10.0E}{1:10.2E}\n' \
.format(self.cinact, self.thkmin))
# A15
f_btn.write('{0:10d}{1:10d}{2:10d}{3:10d}' \
.format(self.ifmtcn, self.ifmtnp, self.ifmtrf,
self.ifmtdp))
if (self.savucn == True):
ss = 'T'
else:
ss = 'F'
f_btn.write('{0:>10s}\n'.format(ss))
# A16, A17
if self.timprs is None:
f_btn.write('{0:10d}\n'.format(self.nprs))
else:
f_btn.write('{0:10d}\n'.format(len(self.timprs)))
timprs = Util2d(self.parent, (len(self.timprs),),
np.float32, self.timprs, name='timprs',
fmtin='(8G10.4)')
timprs.format.fortran = '(8G10.4)'
f_btn.write(timprs.string)
# A18, A19
if self.obs is None:
f_btn.write('{0:10d}{1:10d}\n'.format(0, self.nprobs))
else:
nobs = self.obs.shape[0]
f_btn.write('{0:10d}{1:10d}\n'.format(nobs, self.nprobs))
for i in range(nobs):
f_btn.write('{0:10d}{1:10d}{2:10d}\n' \
.format(self.obs[i, 0] + 1, self.obs[i, 1] + 1,
self.obs[i, 2] + 1))
# A20 CHKMAS, NPRMAS
if (self.chkmas == True):
ss = 'T'
else:
ss = 'F'
f_btn.write('{0:>10s}{1:10d}\n'.format(ss, self.nprmas))
# A21, 22, 23 PERLEN, NSTP, TSMULT
for t in range(self.nper):
s = '{0:10G}{1:10d}{2:10G}'.format(self.perlen[t],
self.nstp[t],
self.tsmult[t])
if self.ssflag is not None:
s += ' ' + self.ssflag[t]
s += '\n'
f_btn.write(s)
f_btn.write('{0:10.4G}{1:10d}{2:10.4G}{3:10.4G}\n'
.format(self.dt0[t], self.mxstrn[t],
self.ttsmult[t], self.ttsmax[t]))
f_btn.close()
return
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
btn : Mt3dBtn object
Mt3dBtn object.
Examples
--------
>>> import flopy
>>> mt = flopy.mt3d.Mt3dms()
>>> btn = flopy.mt3d.Mt3dBtn.load('test.btn', mt)
"""
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# A1
if model.verbose:
print(' loading COMMENT LINES A1 AND A2...')
line = f.readline()
if model.verbose:
print('A1: '.format(line.strip()))
# A2
line = f.readline()
if model.verbose:
print('A2: '.format(line.strip()))
# New keyword options in MT3D-USGS are found here
line = f.readline()
m_arr = line.strip().split()
# Set default values for the keywords
MFStyleArr = False
DRYCell = False
Legacy99Stor = False
FTLPrint = False
NoWetDryPrint = False
OmitDryBud = False
AltWTSorb = False
if m_arr[
0].strip().isdigit() is not True: # If m_arr[0] is not a digit, it is a keyword
for i in range(0, len(m_arr)):
if m_arr[i].upper() == "MODFLOWSTYLEARRAYS":
MFStyleArr = True
model.array_format = None
model.free_format = True
if m_arr[i].upper() == "DRYCELL":
DRYCell = True
if m_arr[i].upper() == "LEGACY99STORAGE":
Legacy99Stor = True
if m_arr[i].upper() == "FTLPRINT":
FTLPrint = True
if m_arr[i].upper() == "NOWETDRYPRINT":
NoWetDryPrint = True
if m_arr[i].upper() == "OMITDRYCELLBUDGET":
OmitDryBud = True
if m_arr[i].upper() == "AltWTSorb":
AltWTSorb = True
# A3
if model.verbose:
print(' loading NLAY, NROW, NCOL, NPER, NCOMP, MCOMP...')
if m_arr[0].isdigit() is False:
line = f.readline()
nlay = int(line[0:10])
nrow = int(line[10:20])
ncol = int(line[20:30])
nper = int(line[30:40])
try:
ncomp = int(line[40:50])
except:
ncomp = 1
try:
mcomp = int(line[50:60])
except:
mcomp = 1
if model.verbose:
print(' NLAY {}'.format(nlay))
print(' NROW {}'.format(nrow))
print(' NCOL {}'.format(ncol))
print(' NPER {}'.format(nper))
print(' NCOMP {}'.format(ncomp))
print(' MCOMP {}'.format(mcomp))
if model.verbose:
print(' loading TUNIT, LUNIT, MUNIT...')
line = f.readline()
tunit = line[0:4]
lunit = line[4:8]
munit = line[8:12]
if model.verbose:
print(' TUNIT {}'.format(tunit))
print(' LUNIT {}'.format(lunit))
print(' MUNIT {}'.format(munit))
if model.verbose:
print(' loading TRNOP...')
trnop = f.readline()[:20].strip().split()
if model.verbose:
print(' TRNOP {}'.format(trnop))
if model.verbose:
print(' loading LAYCON...')
laycon = Util2d.load_txt((nlay,), f, np.int32, '(40I2)')
if model.verbose:
print(' LAYCON {}'.format(laycon))
if model.verbose:
print(' loading DELR...')
delr = Util2d.load(f, model, (ncol,), np.float32, 'delr',
ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' DELR {}'.format(delr))
if model.verbose:
print(' loading DELC...')
delc = Util2d.load(f, model, (nrow,), np.float32, 'delc',
ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' DELC {}'.format(delc))
if model.verbose:
print(' loading HTOP...')
htop = Util2d.load(f, model, (nrow, ncol), np.float32, 'htop',
ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' HTOP {}'.format(htop))
if model.verbose:
print(' loading DZ...')
dz = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'dz',
ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' DZ {}'.format(dz))
if model.verbose:
print(' loading PRSITY...')
prsity = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'prsity',
ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' PRSITY {}'.format(prsity))
if model.verbose:
print(' loading ICBUND...')
icbund = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, 'icbund',
ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' ICBUND {}'.format(icbund))
if model.verbose:
print(' loading SCONC...')
kwargs = {}
sconc = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'sconc1',
ext_unit_dict, array_format="mt3d")
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "sconc" + str(icomp)
if model.verbose:
print(' loading {}...'.format(name))
u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
name, ext_unit_dict, array_format="mt3d")
kwargs[name] = u3d
if model.verbose:
print(' SCONC {}'.format(sconc))
if model.verbose:
print(' loading CINACT, THCKMIN...')
line = f.readline()
cinact = float(line[0:10])
try:
thkmin = float(line[10:20])
except:
thkmin = 0.01
if model.verbose:
print(' CINACT {}'.format(cinact))
print(' THKMIN {}'.format(thkmin))
if model.verbose:
print(' loading IFMTCN, IFMTNP, IFMTRF, IFMTDP, SAVUCN...')
line = f.readline()
ifmtcn = int(line[0:10])
ifmtnp = int(line[10:20])
ifmtrf = int(line[20:30])
ifmtdp = int(line[30:40])
savucn = False
if 't' in line[40:50].lower():
savucn = True
if model.verbose:
print(' IFMTCN {}'.format(ifmtcn))
print(' IFMTNP {}'.format(ifmtnp))
print(' IFMTRF {}'.format(ifmtrf))
print(' IFMTDP {}'.format(ifmtdp))
print(' SAVUCN {}'.format(savucn))
if model.verbose:
print(' loading NPRS...')
line = f.readline()
nprs = int(line[0:10])
if model.verbose:
print(' NPRS {}'.format(nprs))
timprs = None
if nprs > 0:
if model.verbose:
print(' loading TIMPRS...')
timprs = Util2d.load_txt((nprs,), f, np.float32, '(8F10.0)')
if model.verbose:
print(' TIMPRS {}'.format(timprs))
if model.verbose:
print(' loading NOBS, NPROBS...')
line = f.readline()
nobs = int(line[0:10])
try:
nprobs = int(line[10:20])
except:
nprobs = 1
if model.verbose:
print(' NOBS {}'.format(nobs))
print(' NPROBS {}'.format(nprobs))
obs = None
if nobs > 0:
if model.verbose:
print(' loading KOBS, IOBS, JOBS...')
obs = []
for l in range(nobs):
line = f.readline()
k = int(line[0:10])
i = int(line[10:20])
j = int(line[20:30])
obs.append([k, i, j])
obs = np.array(obs)
if model.verbose:
print(' OBS {}'.format(obs))
if model.verbose:
print(' loading CHKMAS, NPRMAS...')
line = f.readline()
chkmas = False
if 't' in line[0:10].lower():
chkmas = True
try:
nprmas = int(line[10:20])
except:
nprmas = 1
if model.verbose:
print(' CHKMAS {}'.format(chkmas))
print(' NPRMAS {}'.format(nprmas))
if model.verbose:
print(
' loading PERLEN, NSTP, TSMULT, TSLNGH, DT0, MXSTRN, TTSMULT, TTSMAX...')
dt0, mxstrn, ttsmult, ttsmax = [], [], [], []
perlen = []
nstp = []
tsmult = []
tslngh = []
ssflag = []
for kper in range(nper):
line = f.readline()
perlen.append(float(line[0:10]))
nstp.append(int(line[10:20]))
tsmult.append(float(line[20:30]))
sf = ' '
ll = line[30:].strip().split()
if len(ll) > 0:
if 'sstate' in ll[0].lower():
sf = 'SState'
ssflag.append(sf)
if tsmult[-1] <= 0:
t = Util2d.load_txt((nstp[-1],), f, np.float32, '(8F10.0)')
tslngh.append(t)
raise Exception("tsmult <= 0 not supported")
line = f.readline()
dt0.append(float(line[0:10]))
mxstrn.append(int(line[10:20]))
ttsmult.append(float(line[20:30]))
ttsmax.append(float(line[30:40]))
if model.verbose:
print(' PERLEN {}'.format(perlen))
print(' NSTP {}'.format(nstp))
print(' TSMULT {}'.format(tsmult))
print(' SSFLAG {}'.format(ssflag))
print(' TSLNGH {}'.format(tslngh))
print(' DT0 {}'.format(dt0))
print(' MXSTRN {}'.format(mxstrn))
print(' TTSMULT {}'.format(ttsmult))
print(' TTSMAX {}'.format(ttsmax))
# Close the file
f.close()
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=Mt3dBtn.ftype())
btn = Mt3dBtn(model, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper,
ncomp=ncomp, mcomp=mcomp, tunit=tunit,
laycon=laycon, delr=delr, delc=delc, htop=htop, dz=dz,
lunit=lunit, munit=munit, prsity=prsity, icbund=icbund,
sconc=sconc, cinact=cinact, thkmin=thkmin,
ifmtcn=ifmtcn, ifmtnp=ifmtnp, ifmtrf=ifmtrf,
ifmtdp=ifmtdp, savucn=savucn, nprs=nprs,
timprs=timprs, obs=obs, nprobs=nprobs, chkmas=chkmas,
nprmas=nprmas, perlen=perlen, nstp=nstp, tsmult=tsmult,
ssflag=ssflag, dt0=dt0, mxstrn=mxstrn, ttsmult=ttsmult,
ttsmax=ttsmax,
unitnumber=unitnumber, filenames=filenames,
**kwargs)
return btn
@staticmethod
def ftype():
return 'BTN'
@staticmethod
def defaultunit():
return 31
@staticmethod
def reservedunit():
return 1
|
the-stack_0_21531 | import os
import tarfile
import pytest
from atomic_reactor.constants import (EXPORTED_COMPRESSED_IMAGE_NAME_TEMPLATE,
IMAGE_TYPE_DOCKER_ARCHIVE)
from atomic_reactor.plugin import PostBuildPluginsRunner
from atomic_reactor.plugins.post_compress import CompressPlugin
from atomic_reactor.inner import BuildResult
class TestCompress(object):
@pytest.mark.skip(reason="plugin has to fetch image differently than via docker")
@pytest.mark.parametrize('source_build', (True, False))
@pytest.mark.parametrize('method, load_exported_image, give_export, extension', [
('gzip', False, True, 'gz'),
('lzma', False, False, 'xz'),
('gzip', True, True, 'gz'),
('gzip', True, False, 'gz'),
('spam', True, True, None),
])
def test_compress(self, tmpdir, caplog, workflow,
source_build, method,
load_exported_image, give_export, extension):
exp_img = os.path.join(str(tmpdir), 'img.tar')
if source_build:
workflow.build_result = BuildResult(source_docker_archive="oci_path")
else:
workflow.build_result = BuildResult(image_id="12345")
if load_exported_image and give_export:
tarfile.open(exp_img, mode='w').close()
workflow.exported_image_sequence.append({'path': exp_img,
'type': IMAGE_TYPE_DOCKER_ARCHIVE})
runner = PostBuildPluginsRunner(
workflow,
[{
'name': CompressPlugin.key,
'args': {
'method': method,
'load_exported_image': load_exported_image,
},
}]
)
if not extension:
with pytest.raises(Exception) as excinfo:
runner.run()
assert 'Unsupported compression format' in str(excinfo.value)
return
runner.run()
if source_build and not (give_export and load_exported_image):
assert 'skipping, no exported source image to compress' in caplog.text
else:
compressed_img = os.path.join(
workflow.source.workdir,
EXPORTED_COMPRESSED_IMAGE_NAME_TEMPLATE.format(extension))
assert os.path.exists(compressed_img)
metadata = workflow.exported_image_sequence[-1]
assert metadata['path'] == compressed_img
assert metadata['type'] == IMAGE_TYPE_DOCKER_ARCHIVE
assert 'uncompressed_size' in metadata
assert isinstance(metadata['uncompressed_size'], int)
assert ", ratio: " in caplog.text
def test_skip_plugin(self, caplog, workflow):
workflow.user_params['scratch'] = True
runner = PostBuildPluginsRunner(
workflow,
[{
'name': CompressPlugin.key,
'args': {
'method': 'gzip',
'load_exported_image': True,
},
}]
)
runner.run()
assert 'scratch build, skipping plugin' in caplog.text
|
the-stack_0_21533 | """
CLI commands to ingest test results from various sources into the Soda cloud.
"""
from __future__ import annotations
import dataclasses
import datetime as dt
import json
import logging
import os
from pathlib import Path
from typing import Iterator, Optional, Tuple
from sodasql.__version__ import SODA_SQL_VERSION
from sodasql.scan.scan_builder import (
build_warehouse_yml_parser,
create_soda_server_client,
)
from sodasql.scan.test import Test
from sodasql.scan.test_result import TestResult
from sodasql.soda_server_client.soda_server_client import SodaServerClient
@dataclasses.dataclass(frozen=True)
class Table:
"""Represents a table."""
name: str
schema: str
database: str
def map_dbt_run_result_to_test_result(
test_nodes: dict[str, "DbtTestNode"],
run_results: list["RunResultOutput"],
) -> dict[str, set["DbtModelNode"]]:
"""
Map run results to test results.
Parameters
----------
test_nodes : Dict[str: DbtTestNode]
The schema test nodes.
run_results : List[RunResultOutput]
The run results.
Returns
-------
out : dict[str, set[DbtModelNode]]
A mapping from run result to test result.
"""
from dbt.contracts.results import TestStatus
dbt_tests_with_soda_test = {
test_node.unique_id: Test(
id=test_node.unique_id,
title=f"{test_node.name}",
expression=test_node.raw_sql,
metrics=None,
column=test_node.column_name,
source="dbt",
)
for test_node in test_nodes.values()
}
tests_with_test_result = {
run_result.unique_id: TestResult(
dbt_tests_with_soda_test[run_result.unique_id],
passed=run_result.status == TestStatus.Pass,
skipped=run_result.status == TestStatus.Skipped,
values={"failures": run_result.failures},
)
for run_result in run_results
if run_result.unique_id in test_nodes.keys()
}
return tests_with_test_result
def map_dbt_test_results_iterator(
manifest_file: Path, run_results_file: Path
) -> Iterator[tuple[Table, list[TestResult]]]:
"""
Create an iterator for the dbt test results.
Parameters
----------
manifest_file : Path
The path to the manifest file.
run_results_file : Path
The path to the run results file.
Returns
-------
out : Iterator[tuple[Table, list[TestResult]]]
The table and its corresponding test results.
"""
try:
from sodasql import dbt as soda_dbt
except ImportError as e:
raise RuntimeError(
"Soda SQL dbt extension is not installed: $ pip install soda-sql-dbt"
) from e
with manifest_file.open("r") as file:
manifest = json.load(file)
with run_results_file.open("r") as file:
run_results = json.load(file)
model_nodes, seed_nodes, test_nodes = soda_dbt.parse_manifest(manifest)
parsed_run_results = soda_dbt.parse_run_results(run_results)
tests_with_test_result = map_dbt_run_result_to_test_result(test_nodes, parsed_run_results)
model_and_seed_nodes = {**model_nodes, **seed_nodes}
models_with_tests = soda_dbt.create_nodes_to_tests_mapping(
model_and_seed_nodes, test_nodes, parsed_run_results
)
for unique_id, test_unique_ids in models_with_tests.items():
table = Table(
model_and_seed_nodes[unique_id].alias,
model_and_seed_nodes[unique_id].database,
model_and_seed_nodes[unique_id].schema,
)
test_results = [
tests_with_test_result[test_unique_id] for test_unique_id in test_unique_ids
]
yield table, test_results
def flush_test_results(
test_results_iterator: Iterator[tuple[Table, list[TestResult]]],
soda_server_client: SodaServerClient,
*,
warehouse_name: str,
warehouse_type: str,
) -> None:
"""
Flush the test results.
Parameters
----------
test_results_iterator : Iterator[tuple[Table, list[TestResult]]]
The test results.
soda_server_client : SodaServerClient
The soda server client.
warehouse_name : str
The warehouse name.
warehouse_type : str
The warehouse (and dialect) type.
"""
for table, test_results in test_results_iterator:
test_results_jsons = [
test_result.to_dict() for test_result in test_results if not test_result.skipped
]
if len(test_results_jsons) == 0:
continue
start_scan_response = soda_server_client.scan_start(
warehouse_name=warehouse_name,
warehouse_type=warehouse_type,
warehouse_database_name=table.database,
warehouse_database_schema=table.schema,
table_name=table.name,
scan_yml_columns=None,
scan_time=dt.datetime.now().isoformat(),
origin=os.environ.get("SODA_SCAN_ORIGIN", "external"),
)
soda_server_client.scan_test_results(
start_scan_response["scanReference"], test_results_jsons
)
soda_server_client.scan_ended(start_scan_response["scanReference"])
def resolve_artifacts_paths(
dbt_artifacts: Optional[Path] = None,
dbt_manifest: Optional[Path] = None,
dbt_run_results: Optional[Path] = None
) -> Tuple[Path, Path]:
if dbt_artifacts:
dbt_manifest = Path(dbt_artifacts) / 'manifest.json'
dbt_run_results = Path(dbt_artifacts) / 'run_results.json'
elif dbt_manifest is None:
raise ValueError(
"--dbt-manifest or --dbt-artifacts are required. "
f"Currently, dbt_manifest={dbt_manifest} and dbt_artifacts={dbt_artifacts}"
)
elif dbt_run_results is None:
raise ValueError(
"--dbt-run-results or --dbt-artifacts are required. "
f"Currently, dbt_run_results={dbt_manifest} and dbt_artifacts={dbt_artifacts}"
)
return dbt_manifest, dbt_run_results
def ingest(
tool: str,
warehouse_yml_file: str,
dbt_artifacts: Path | None = None,
dbt_manifest: Path | None = None,
dbt_run_results: Path | None = None,
) -> None:
"""
Ingest test information from different tools.
Arguments
---------
tool : str {'dbt'}
The tool name.
warehouse_yml_file : str
The warehouse yml file.
dbt_artifacts : Optional[Path]
The path to the folder conatining both the manifest and run_results.json.
When provided, dbt_manifest and dbt_run_results will be ignored.
dbt_manifest : Optional[Path]
The path to the dbt manifest.
dbt_run_results : Optional[Path]
The path to the dbt run results.
Raises
------
ValueError :
If the tool is unrecognized.
"""
logger = logging.getLogger(__name__)
logger.info(SODA_SQL_VERSION)
warehouse_yml_parser = build_warehouse_yml_parser(warehouse_yml_file)
warehouse_yml = warehouse_yml_parser.warehouse_yml
soda_server_client = create_soda_server_client(warehouse_yml)
if not soda_server_client.api_key_id or not soda_server_client.api_key_secret:
raise ValueError("Missing Soda cloud api key id and/or secret.")
if tool == 'dbt':
dbt_manifest, dbt_run_results = resolve_artifacts_paths(
dbt_artifacts=dbt_artifacts,
dbt_manifest=dbt_manifest,
dbt_run_results=dbt_run_results
)
test_results_iterator = map_dbt_test_results_iterator(dbt_manifest, dbt_run_results)
else:
raise NotImplementedError(f"Unknown tool: {tool}")
flush_test_results(
test_results_iterator,
soda_server_client,
warehouse_name=warehouse_yml.name,
warehouse_type=warehouse_yml.dialect.type,
)
|
the-stack_0_21534 | #!/usr/bin/env python
#
# pycmtensor documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import shutil
import sys
import sphinx
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "PyCMTensor"
copyright = "2022, Melvin Wong"
author = "Melvin Wong"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = "0.7.0"
# The full version, including alpha/beta/rc tags.
release = "0.7.0"
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"autoapi.extension",
"sphinx.ext.napoleon",
"sphinx.ext.graphviz",
"sphinx.ext.inheritance_diagram",
"myst_nb",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
".rst": "restructuredtext",
".ipynb": "myst-nb",
".md": "myst-nb",
}
# The master toctree document.
master_doc = "index"
suppress_warnings = ["autoapi"]
# -- Options for myst-nb -----------------------------------------------------
jupyter_execute_notebooks = "off"
# -- Options for autoapi -----------------------------------------------------
def skip_config_classes(app, what, name, obj, skip, options):
if what == "data":
skip = True
return skip
def setup(sphinx):
sphinx.connect("autoapi-skip-member", skip_config_classes)
autoapi_add_toctree_entry = True
autoapi_python_class_content = "both"
autoapi_type = "python"
autoapi_keep_files = True
graphviz_dot = shutil.which("dot")
autoapi_dirs = ["../pycmtensor"]
autoapi_options = [
"members",
"undoc-members",
"private-members",
"show-inheritance",
"show-inheritance-diagram",
# "show-module-summary",
# "special-members",
# "imported-members",
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"repository_url": "https://github.com/mwong009/pycmtensor",
"use_repository_button": True,
"repository_branch": "Master",
"use_issues_button": True,
"path_to_docs": "docs/",
"home_page_in_toc": False,
"use_edit_page_button": True,
"show_toc_level": 1,
}
# html_logo = "path/to/myimage.png"
# html_title = "My site title"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pycmtensordoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, "pycmtensor.tex", "PyCMTensor Documentation", "Melvin Wong", "manual"),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pycmtensor", "PyCMTensor Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pycmtensor",
"PyCMTensor Documentation",
author,
"pycmtensor",
"One line description of project.",
"Miscellaneous",
),
]
# myst_commonmark_only = True
|
the-stack_0_21535 | from talon import Module, fs, Context
import os
import csv
from pathlib import Path
from typing import Dict, List, Tuple
import threading
# NOTE: This method requires this module to be one folder below the top-level
# knausj folder.
SETTINGS_DIR = Path(__file__).parents[1] / "settings"
PRIVATE_SETTINGS_DIR = Path(__file__).parents[1] / "private"
if not SETTINGS_DIR.is_dir():
os.mkdir(SETTINGS_DIR)
mod = Module()
ctx = Context()
def _load_csv_dict(
file_name: str, headers=Tuple[str, str], default: Dict[str, str] = {}
) -> Dict[str, str]:
"""Load a word mapping from a CSV file. If it doesn't exist, create it."""
assert file_name.endswith(".csv")
path = SETTINGS_DIR / file_name
# Create the file if it doesn't exist
if not SETTINGS_DIR.is_dir():
os.mkdir(SETTINGS_DIR)
if not path.is_file():
with open(path, "w", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(headers)
for key, value in default.items():
writer.writerow([key] if key == value else [value, key])
# Now read from disk
with open(path, "r", encoding="utf-8") as file:
rows = list(csv.reader(file))
mapping = {}
if len(rows) >= 2:
actual_headers = rows[0]
if not actual_headers == list(headers):
print(
f'"{file_name}": Malformed headers - {actual_headers}.'
+ f" Should be {list(headers)}. Ignoring row."
)
for row in rows[1:]:
if len(row) == 0:
# Windows newlines are sometimes read as empty rows. :champagne:
continue
if len(row) == 1:
output = spoken_form = row[0]
else:
output, spoken_form = row[:2]
if len(row) > 2:
print(
f'"{file_name}": More than two values in row: {row}.'
+ " Ignoring the extras."
)
# Leading/trailing whitespace in spoken form can prevent recognition.
spoken_form = spoken_form.strip()
mapping[spoken_form] = output
return mapping
_mapped_lists = {}
_settings_lock = threading.Lock()
_word_map_params = None
def _update_list(list_name: str, *csv_params):
"""Update list with `list_name` from a csv on disk.
`csv_params` will be passed to `_load_csv_dict`.
"""
global ctx
ctx.lists[list_name] = _load_csv_dict(*csv_params)
def _update_word_map(*csv_params):
"""Update `dictate.word_map` from disk.
`csv_params` will be passed to `_load_csv_dict`.
"""
global ctx
ctx.settings["dictate.word_map"] = _load_csv_dict(*csv_params)
def _update_lists(*_):
"""Update all CSV lists from disk."""
print("Updating CSV lists...")
with _settings_lock:
for list_name, csv_params in _mapped_lists.items():
try:
_update_list(list_name, *csv_params)
except Exception as e:
print(f'Error loading list "{list_name}": {e}')
# Special case - `dictate.word_map` isn't a list.
if _word_map_params:
try:
_update_word_map(*_word_map_params)
except Exception as e:
print(f'Error updating "dictate.word_map": {e}')
def bind_list_to_csv(
list_name: str,
csv_name: str,
csv_headers: Tuple[str, str],
default_values: Dict[str, str] = {},
) -> None:
"""Register a Talon list that should be updated from a CSV on disk.
The CSV file will be created automatically in the "settings" dir if it
doesn't exist. This directory can be tracked independently to
`knausj_talon`, allowing the user to specify things like private vocab
separately.
Note the list must be declared separately.
"""
global _mapped_lists
with _settings_lock:
_update_list(list_name, csv_name, csv_headers, default_values)
# If there were no errors, we can register it permanently.
_mapped_lists[list_name] = (csv_name, csv_headers, default_values)
def bind_word_map_to_csv(
csv_name: str, csv_headers: Tuple[str, str], default_values: Dict[str, str] = {}
) -> None:
"""Like `bind_list_to_csv`, but for the `dictate.word_map` setting.
Since it is a setting, not a list, it has to be handled separately.
"""
global _word_map_params
# TODO: Maybe a generic system for binding the dicts to settings? Only
# implement if it's needed.
with _settings_lock:
_update_word_map(csv_name, csv_headers, default_values)
# If there were no errors, we can register it permanently.
_word_map_params = (csv_name, csv_headers, default_values)
fs.watch(str(SETTINGS_DIR), _update_lists)
fs.watch(str(PRIVATE_SETTINGS_DIR), _update_lists)
|
the-stack_0_21536 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# scvi documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import sys
from pathlib import Path
HERE = Path(__file__).parent
sys.path[:0] = [str(HERE.parent), str(HERE / "extensions")]
import scvi # noqa
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "3.4" # Nicer param docs
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"nbsphinx",
"nbsphinx_link",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints", # needs to be after napoleon
"sphinx.ext.autosummary",
"scanpydoc.elegant_typehints",
"scanpydoc.definition_list_typed_field",
"scanpydoc.autosummary_generate_imported",
*[p.stem for p in (HERE / "extensions").glob("*.py")],
"sphinx_copybutton",
"sphinx_gallery.load_style",
"sphinx_tabs.tabs",
]
# nbsphinx specific settings
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_execute = "never"
templates_path = ["_templates"]
source_suffix = ".rst"
# Generate the API documentation when building
autosummary_generate = True
autodoc_member_order = "bysource"
napoleon_google_docstring = True # for pytorch lightning
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_use_rtype = True # having a separate entry generally helps readability
napoleon_use_param = True
napoleon_custom_sections = [("Params", "Parameters")]
todo_include_todos = False
numpydoc_show_class_members = False
annotate_defaults = True # scanpydoc option, look into why we need this
# The master toctree document.
master_doc = "index"
intersphinx_mapping = dict(
anndata=("https://anndata.readthedocs.io/en/stable/", None),
ipython=("https://ipython.readthedocs.io/en/stable/", None),
matplotlib=("https://matplotlib.org/", None),
numpy=("https://numpy.org/doc/stable/", None),
pandas=("https://pandas.pydata.org/docs/", None),
python=("https://docs.python.org/3", None),
scipy=("https://docs.scipy.org/doc/scipy/reference/", None),
sklearn=("https://scikit-learn.org/stable/", None),
torch=("https://pytorch.org/docs/master/", None),
scanpy=("https://scanpy.readthedocs.io/en/stable/", None),
pytorch_lightning=("https://pytorch-lightning.readthedocs.io/en/stable/", None),
pyro=("http://docs.pyro.ai/en/stable/", None),
)
# General information about the project.
project = u"scvi-tools"
copyright = u"2021, Yosef Lab, UC Berkeley"
author = u"Romain Lopez, Adam Gayoso, Pierre Boyeau, Galen Xing"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = scvi.__version__
# The full version, including alpha/beta/rc tags.
release = scvi.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
pygments_dark_style = "default"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
html_show_sourcelink = True
html_theme = "pydata_sphinx_theme"
html_context = dict(
# display_github=True, # Integrate GitHub
github_user="YosefLab", # Username
github_repo="scvi-tools", # Repo name
github_version="master", # Version
doc_path="docs/", # Path in the checkout to the docs root
)
# Set link name generated in the top bar.
html_title = "scvi-tools"
html_logo = "_static/logo.png"
html_theme_options = {
"github_url": "https://github.com/YosefLab/scvi-tools",
"twitter_url": "https://twitter.com/YosefLab",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/override.css", "css/sphinx_gallery.css"]
html_show_sphinx = False
nbsphinx_prolog = r"""
.. raw:: html
{{% set docname = env.doc2path(env.docname, base=None).split("/")[-1] %}}
.. raw:: html
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>
This page was generated from
<a class="reference external" href="https://github.com/yoseflab/scvi-tutorials/tree/{version}/">{docname}</a>.
Interactive online version:
<span style="white-space: nowrap;"><a href="https://colab.research.google.com/github/yoseflab/scvi_tutorials/blob/{version}/{docname}"><img alt="Colab badge" src="https://colab.research.google.com/assets/colab-badge.svg" style="vertical-align:text-bottom"></a>.</span>
</p>
</div>
""".format(
version=version, docname="{{ docname|e }}"
)
nbsphinx_thumbnails = {
"user_guide/notebooks/data_loading": "_static/tutorials/anndata.svg",
"user_guide/notebooks/api_overview": "_static/tutorials/overview.svg",
"user_guide/notebooks/linear_decoder": "_static/tutorials/ldvae.svg",
"user_guide/notebooks/scvi_in_R": "_static/tutorials/Rlogo.png",
"user_guide/notebooks/harmonization": "_static/tutorials/scvi_batch.png",
"user_guide/notebooks/totalVI": "_static/tutorials/totalvi_cell.svg",
"user_guide/notebooks/AutoZI_tutorial": "_static/tutorials/history.png",
"user_guide/notebooks/gimvi_tutorial": "_static/tutorials/gimvi.png",
"user_guide/notebooks/scarches_scvi_tools": "_static/tutorials/scarches.png",
"user_guide/notebooks/cite_scrna_integration_w_totalVI": "_static/tutorials/cite_scrna.png",
"user_guide/notebooks/scVI_DE_worm": "_static/tutorials/worm.png",
"user_guide/notebooks/stereoscope_heart_LV_tutorial": "_static/tutorials/stereoscope.png",
"user_guide/notebooks/seed_labeling": "_static/tutorials/seed.png",
"user_guide/notebooks/cellassign_tutorial": "_static/tutorials/cellassign.png",
"user_guide/notebooks/DestVI_tutorial": "_static/tutorials/destvi.png",
"user_guide/notebooks/PeakVI": "_static/tutorials/peakvi.png",
}
def setup(app):
# https://github.com/pradyunsg/furo/issues/49
app.config.pygments_dark_style = "default"
|
the-stack_0_21539 | """Builder pattern
The Builder pattern separates the construction of a complex object from its
representation so that the same construction process can create different
representations.
"""
from abc import ABC, abstractmethod
class IceCream(ABC):
"""Abstract Product."""
@property
def need_spoon(self):
return False
def __str__(self):
string = self.__class__.__name__
for key, value in self.__dict__.items():
string += "\n{}: {}".format(key, value)
string += "\n"
return string
class ConeIceCream(IceCream):
"""Concrete Product 1."""
pass
class CupIceCream(IceCream):
"""Concrete Product 2."""
@property
def need_spoon(self):
return True
class Builder(ABC):
"""Specify the abstract interface that creates all parts of the product.
This Abstract interface is used by a Director object. All methods except
"get_product" return self, so this class is a "fluent interface".
"""
@abstractmethod
def __init__(self):
self.product = None
self.toppings = None
def set_flavors(self, flavors):
self.product.flavors = flavors
return self
def set_toppings(self):
if self.toppings is not None:
self.product.toppings = self.toppings
return self
def add_spoon(self):
if self.product.need_spoon:
self.product.spoon = 1
return self
def get_product(self):
return self.product
class ConeIceCreamBuilder(Builder):
"""Concrete Builder 1.
This class assembles the product by implementing the Builder interface.
It defines and keeps track of the representation it creates.
"""
def __init__(self):
# super().__init__() # ok in Python 3.x, not in 2.x
super(self.__class__, self).__init__() # also ok in Python 2.x
self.product = ConeIceCream()
self.toppings = "hazelnuts"
class CupIceCreamBuilder(Builder):
"""Concrete Builder 2.
This class assembles the product by implementing the Builder interface.
It defines and keeps track of the representation it creates.
"""
def __init__(self):
# super().__init__() # ok in Python 3.x, not in 2.x
super(self.__class__, self).__init__() # also ok in Python 2.x
self.product = CupIceCream()
self.toppings = "chocolate chips"
class Director(object):
"""Build an object using the Builder interface."""
def __init__(self, builder):
self.builder = builder
def build_product(self, flavors):
"""Prepare the product and finally return it to the client.
The Builder class defined above is a "fluent interface", so we can use
method chaining.
Parameters
----------
flavors : list
Returns
-------
ConeIceCream or CupIceCream
"""
return (
self.builder.set_flavors(flavors).set_toppings().add_spoon().get_product()
)
# Client: it creates a Director object and configures it with a Builder object.
def main():
director = Director(ConeIceCreamBuilder())
product = director.build_product(["chocolate", "vanilla", "banana"])
print(product)
director = Director(CupIceCreamBuilder())
product = director.build_product(["lemon", "strawberry"])
print(product)
builder = ConeIceCreamBuilder()
director = Director(builder)
builder.toppings = None # the ConeIceCreamBuilder has no more toppings!
product = director.build_product(["chocolate", "vanilla", "banana"])
print(product)
if __name__ == "__main__":
main()
|
the-stack_0_21542 | from django.urls import reverse
from rest_framework import status
from django.contrib.auth.models import User
from rest_framework.test import APITestCase , APIClient
import json
# Create your tests here.
class FeedTestsViews(APITestCase):
def setUp(self):
url = reverse('users-api:register')
data = {
'username':'test',
'email':'[email protected]',
'password':'test@123'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(User.objects.get().username, 'test')
self.test_user = User.objects.get(username='test')
self.test_user_pwd = 'test@123'
url = 'mumbles-api:mumble-create'
reversed_url = reverse(url)
data = {
'content':"Mumble Test Post"
}
client = APIClient()
client.force_authenticate(user=self.test_user)
response = client.post(reversed_url, data)
self.mumble = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code,status.HTTP_200_OK)
def test_users_url(self):
url = 'mumbles-api:mumbles'
reversed_url = reverse(url)
client = APIClient()
client.force_authenticate(user=self.test_user)
response = client.get(reversed_url)
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data.get('count'),1)
def test_mumbles_edit_view(self):
url = 'mumbles-api:mumble-edit'
reversed_url = reverse(url,args=[self.mumble.get('id')])
client = APIClient()
client.force_authenticate(user=self.test_user)
data = {
'content':"Mumble Post edited"
}
response = client.patch(reversed_url,data, format='json')
response_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data.get('content'),data.get('content'))
self.mumble = response_data
def test_mumbles_details_view(self):
client = APIClient()
client.force_authenticate(user=self.test_user)
url = 'mumbles-api:mumble-details'
reversed_url = reverse(url,args=[self.mumble.get('id')])
response = client.get(reversed_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
the-stack_0_21544 | # Copyright 2021 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.utils.common."""
from orbit.utils import common
import tensorflow as tf
class UtilsTest(tf.test.TestCase):
def test_create_global_step(self):
step = common.create_global_step()
self.assertEqual(step.name, "global_step:0")
self.assertEqual(step.dtype, tf.int64)
self.assertEqual(step, 0)
step.assign_add(1)
self.assertEqual(step, 1)
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_21546 | import logging
from typing import Any, Dict, List, Optional, Tuple
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, check_send_webhook_message
from zerver.models import UserProfile
DEPRECATED_EXCEPTION_MESSAGE_TEMPLATE = """
New [issue]({url}) (level: {level}):
``` quote
{message}
```
"""
MESSAGE_EVENT_TEMPLATE = """
**New message event:** [{title}]({web_link})
```quote
**level:** {level}
**timestamp:** {datetime}
```
"""
EXCEPTION_EVENT_TEMPLATE = """
**New exception:** [{title}]({web_link})
```quote
**level:** {level}
**timestamp:** {datetime}
**filename:** {filename}
```
"""
EXCEPTION_EVENT_TEMPLATE_WITH_TRACEBACK = EXCEPTION_EVENT_TEMPLATE + """
Traceback:
```{syntax_highlight_as}
{pre_context}---> {context_line}{post_context}\
```
"""
# Because of the \n added at the end of each context element,
# this will actually look better in the traceback.
ISSUE_CREATED_MESSAGE_TEMPLATE = """
**New issue created:** {title}
```quote
**level:** {level}
**timestamp:** {datetime}
**assignee:** {assignee}
```
"""
ISSUE_ASSIGNED_MESSAGE_TEMPLATE = """
Issue **{title}** has now been assigned to **{assignee}** by **{actor}**.
"""
ISSUE_RESOLVED_MESSAGE_TEMPLATE = """
Issue **{title}** was marked as resolved by **{actor}**.
"""
ISSUE_IGNORED_MESSAGE_TEMPLATE = """
Issue **{title}** was ignored by **{actor}**.
"""
# Maps "platform" name provided by Sentry to the Pygments lexer name
syntax_highlight_as_map = {
"go": "go",
"java": "java",
"javascript": "javascript",
"node": "javascript",
"python": "python3",
}
def convert_lines_to_traceback_string(lines: Optional[List[str]]) -> str:
traceback = ""
if lines is not None:
for line in lines:
if (line == ""):
traceback += "\n"
else:
traceback += f" {line}\n"
return traceback
def handle_event_payload(event: Dict[str, Any]) -> Tuple[str, str]:
""" Handle either an exception type event or a message type event payload."""
subject = event["title"]
# We shouldn't support the officially deprecated Raven series of SDKs.
if int(event["version"]) < 7:
raise UnexpectedWebhookEventType("Sentry", "Raven SDK")
platform_name = event["platform"]
syntax_highlight_as = syntax_highlight_as_map.get(platform_name, "")
if syntax_highlight_as == "": # nocoverage
logging.info(f"Unknown Sentry platform: {platform_name}")
context = {
"title": subject,
"level": event["level"],
"web_link": event["web_url"],
"datetime": event["datetime"].split(".")[0].replace("T", " "),
}
if "exception" in event:
# The event was triggered by a sentry.capture_exception() call
# (in the Python Sentry SDK) or something similar.
filename = event["metadata"].get("filename", None)
stacktrace = None
for value in reversed(event["exception"]["values"]):
if "stacktrace" in value:
stacktrace = value["stacktrace"]
break
if stacktrace and filename:
exception_frame = None
for frame in reversed(stacktrace["frames"]):
if frame.get("filename", None) == filename:
exception_frame = frame
break
if exception_frame and exception_frame["context_line"]:
pre_context = convert_lines_to_traceback_string(exception_frame["pre_context"])
context_line = exception_frame["context_line"] + "\n"
if not context_line:
context_line = "\n" # nocoverage
post_context = convert_lines_to_traceback_string(exception_frame["post_context"])
context.update({
"syntax_highlight_as": syntax_highlight_as,
"filename": filename,
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
})
body = EXCEPTION_EVENT_TEMPLATE_WITH_TRACEBACK.format(**context)
return (subject, body)
context.update({"filename": filename}) # nocoverage
body = EXCEPTION_EVENT_TEMPLATE.format(**context) # nocoverage
return (subject, body) # nocoverage
elif "logentry" in event:
# The event was triggered by a sentry.capture_message() call
# (in the Python Sentry SDK) or something similar.
body = MESSAGE_EVENT_TEMPLATE.format(**context)
else:
raise UnexpectedWebhookEventType("Sentry", "unknown-event type")
return (subject, body)
def handle_issue_payload(action: str, issue: Dict[str, Any], actor: Dict[str, Any]) -> Tuple[str, str]:
""" Handle either an issue type event. """
subject = issue["title"]
datetime = issue["lastSeen"].split(".")[0].replace("T", " ")
if issue["assignedTo"]:
if issue["assignedTo"]["type"] == "team":
assignee = "team {}".format(issue["assignedTo"]["name"])
else:
assignee = issue["assignedTo"]["name"]
else:
assignee = "No one"
if action == "created":
context = {
"title": subject,
"level": issue["level"],
"datetime": datetime,
"assignee": assignee,
}
body = ISSUE_CREATED_MESSAGE_TEMPLATE.format(**context)
elif action == "resolved":
context = {
"title": subject,
"actor": actor["name"],
}
body = ISSUE_RESOLVED_MESSAGE_TEMPLATE.format(**context)
elif action == "assigned":
context = {
"title": subject,
"assignee": assignee,
"actor": actor["name"],
}
body = ISSUE_ASSIGNED_MESSAGE_TEMPLATE.format(**context)
elif action == "ignored":
context = {
"title": subject,
"actor": actor["name"],
}
body = ISSUE_IGNORED_MESSAGE_TEMPLATE.format(**context)
else:
raise UnexpectedWebhookEventType("Sentry", "unknown-issue-action type")
return (subject, body)
def handle_deprecated_payload(payload: Dict[str, Any]) -> Tuple[str, str]:
subject = "{}".format(payload.get('project_name'))
body = DEPRECATED_EXCEPTION_MESSAGE_TEMPLATE.format(
level=payload['level'].upper(),
url=payload.get('url'),
message=payload.get('message'),
)
return (subject, body)
@api_key_only_webhook_view('Sentry')
@has_request_variables
def api_sentry_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body")) -> HttpResponse:
data = payload.get("data", None)
# We currently support two types of payloads: events and issues.
if data:
if "event" in data:
subject, body = handle_event_payload(data["event"])
elif "issue" in data:
subject, body = handle_issue_payload(payload["action"], data["issue"], payload["actor"])
else:
raise UnexpectedWebhookEventType("Sentry", str(list(data.keys())))
else:
subject, body = handle_deprecated_payload(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
|
the-stack_0_21547 | """
Reading images from S3 and test model tfserving cpu RESTful API locally
Author: @developmentseed
Use:
python test_local_api.py --model_name=human_activities --s3_profile=default --batch_size=2
"""
import os
from os import path as op
from itertools import zip_longest
from typing import List, Dict, Any
import boto3
import json
import base64
import requests
from os import makedirs, path as op
import time
import click
from tqdm import tqdm
start_time = time.time()
def _grouper(iterable, n, fillvalue=None):
"Itertool recipe to collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def get_images(s3_profile, s3_keys: List, download_folder):
if not op.isdir(download_folder):
makedirs(download_folder)
files = []
s3 = boto3.Session(profile_name=s3_profile).client("s3")
for s3_file in s3_keys:
filename = download_folder + "/" + s3_file.split("/")[1]
s3.download_file("aisurvey", s3_file, filename)
files.append(filename)
return files
def post_prediction(url_endpoint, payload):
resp = requests.post(url_endpoint, data=payload)
resp.raise_for_status()
return resp.json()
def format_od_preds(content):
preds = content["predictions"]
preds_out = []
for _, pred in enumerate(preds):
# print(f'\nPrediction number {pi}')
n_ods = int(pred["num_detections"])
pred_list = []
for i in range(n_ods):
pred_i = {}
if pred["detection_scores"][i] > 0.5:
pred_i["detection_classes"] = pred["detection_classes"][i]
pred_i["detection_scores"] = pred["detection_scores"][i]
pred_i["detection_boxes"] = pred["detection_boxes"][i]
pred_list.append(pred_i)
preds_out.append(pred_list)
return preds_out
@click.command(short_help="Testing local tfserving api")
@click.option(
"-m",
"--model_name",
help="Model name",
required=True,
type=str,
default="livestock",
)
@click.option(
"-s",
"--s3_profile",
help="S3 profile",
required=True,
type=str,
default="ai4e",
)
@click.option(
"-b",
"--batch_size",
help="batch_size",
required=True,
type=str,
default=5,
)
def main(model_name, s3_profile, batch_size = 5):
# Set the url of the running Docker container
# url_endpoint = "http://ai4ea-ai4ea-12guawqsr5g2p-176578657.us-east-1.elb.amazonaws.com/v1/models/wildlife:predict"
url_endpoint = f"http://0.0.0.0:8501/v1/models/{model_name}:predict"
s3 = boto3.Session(profile_name=s3_profile).client("s3")
s3_keys = [
"cormon2019_chips/wcm_n51_L_20190602095318_14_8.jpg",
"cormon2019_chips/wcm_n51_L_20190602095318_14_9.jpg",
"cormon2019_chips/wcm_n51_L_20190602095318_9_9.jpg",
"cormon2019_chips/wcm_n51_L_20190602095318_9_8.jpg",
]
# images = get_images(s3_profile, s3_keys, "data/images")
image_directory = "test_data"
images = [op.join(image_directory, f) for f in os.listdir(image_directory) if f.endswith(".jpg")]
# # Iterate through groups of images
for i, img_group in enumerate(_grouper(images, int(batch_size))):
print(img_group)
instances = []
for batch_img_fname in img_group:
if batch_img_fname is not None:
with open(batch_img_fname, 'rb') as image_file:
b64_image = base64.b64encode(image_file.read())
instances.append({'b64': b64_image.decode('utf-8')})
load_file_time = time.time() - start_time
print(f'Load files : {str(load_file_time)}')
# # Run prediction
payload = json.dumps({"instances": instances})
content = post_prediction(url_endpoint, payload)
print(f'post_prediction : {str(time.time() -load_file_time- start_time)}')
preds = format_od_preds(content)
print(json.dumps(preds))
print(f'format_od_preds : {str(time.time() - load_file_time - start_time)}')
if __name__ == '__main__':
main()
|
the-stack_0_21548 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
import numpy as np
import torch
import itertools
import requests
import time
from tracker.multitracker import JDETracker
from tracking_utils import visualization as vis
from tracking_utils.log import logger
from tracking_utils.timer import Timer
from tracking_utils.evaluation import Evaluator
import datasets.dataset.jde as datasets
from tracking_utils.utils import mkdir_if_missing
from opts import opts
muna = 0
def letterbox(img, height=608, width=1088,
color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular
return img, ratio, dw, dh
def eval_seq(opt, IP_ADDRESS, save_dir=None, show_image=True, frame_rate=30):
camera_ip_address = []
camera_api = "http://" + str(IP_ADDRESS)+ "/api/cameras"
r = requests.get(camera_api)
for i in (r.json()['data']):
camera_ip_address.append(i['camera_ip_address'])
print(camera_ip_address)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
global muna
for camera_addr in itertools.cycle(camera_ip_address):
print(camera_addr)
camera_addr = camera_addr + "&?resolution=1280"
cam = cv2.VideoCapture(camera_addr)
img_size=(1088, 608)
width = img_size[0]
height = img_size[1]
count = 0
w, h = 1280, 720
#frame_id = 0
print(muna)
muna += 1
prev_time = time.time()
while True:
muna = muna + 1
cur_time = time.time()
if (cur_time - prev_time) < 4:
count = count + 1
res, img0 = cam.read()
assert img0 is not None, 'Failed to load frame {:d}'.format(count)
img0 = cv2.resize(img0, (w, h))
img, _, _, _ = letterbox(img0, height=height, width=width)
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
frame_id = frame_id + 1
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
#online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
#online_scores.append(t.score)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
if 1:
online_im = vis.plot_tracking(img0,muna, online_tlwhs, online_ids, frame_id=0,
fps=1. / timer.average_time)
if 1:
cv2.imshow('online_im', online_im)
cv2.waitKey(1)
frame_id += 1
muna = muna + 1
else:
break
cam.release()
|
the-stack_0_21551 | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class CreateManualBackupResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'backup': 'BackupInfo'
}
attribute_map = {
'backup': 'backup'
}
def __init__(self, backup=None):
"""CreateManualBackupResponse - a model defined in huaweicloud sdk"""
super(CreateManualBackupResponse, self).__init__()
self._backup = None
self.discriminator = None
if backup is not None:
self.backup = backup
@property
def backup(self):
"""Gets the backup of this CreateManualBackupResponse.
:return: The backup of this CreateManualBackupResponse.
:rtype: BackupInfo
"""
return self._backup
@backup.setter
def backup(self, backup):
"""Sets the backup of this CreateManualBackupResponse.
:param backup: The backup of this CreateManualBackupResponse.
:type: BackupInfo
"""
self._backup = backup
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateManualBackupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_21554 | import os
import sys
import subprocess
import shutil
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, sourcedir='../cpp'):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuildExt(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError('cmake is required')
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
output_dir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
abi_flag = os.environ.get('CMAKE_CXX11_ABI', 'OFF')
build_type = 'Debug' if self.debug else 'Release'
cmake_args = ['cmake',
ext.sourcedir,
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + output_dir,
'-DCMAKE_BUILD_TYPE=' + build_type,
'-DCMAKE_CXX11_ABI=' + abi_flag]
cmake_args.extend(
[x for x in os.environ.get('CMAKE_COMMON_VARIABLES', '').split(' ')
if x])
env = os.environ.copy()
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(cmake_args,
cwd=self.build_temp,
env=env)
subprocess.check_call(['make', '-j', ext.name],
cwd=self.build_temp,
env=env)
print()
shutil.rmtree('build', ignore_errors=True)
install_requires = ['numba>=0.40.0dev']
with open('../LICENSE', encoding='UTF-8') as f:
license_text = f.read()
cuda_version = ''.join(os.environ.get('CUDA_VERSION', 'unknown').split('.')[:2])
name = 'nvstrings-cuda{}'.format(cuda_version)
version = os.environ.get('GIT_DESCRIBE_TAG', '0.3.0.dev0').lstrip('v')
setup(name=name,
description='CUDA strings Python bindings',
version=version,
py_modules=['nvstrings', 'nvcategory'],
url='https://github.com/NVIDIA/nvstrings',
author='NVIDIA Corporation',
license=license_text,
install_requires=install_requires,
ext_modules=[CMakeExtension('NVStrings'),
CMakeExtension('pyniNVStrings'),
CMakeExtension('NVCategory'),
CMakeExtension('pyniNVCategory')],
cmdclass={'build_ext': CMakeBuildExt},
headers=['NVStrings.h', 'NVCategory.h'],
zip_safe=False
)
from pip_correction import convert_to_manylinux
convert_to_manylinux(name, version)
|
the-stack_0_21555 | #!/usr/bin/python
from BoostBuild import Tester, List
t = Tester()
# Test that top-level project can affect build dir
t.write("project-root.jam", "")
t.write("Jamfile", """
exe a : a.cpp ;
exe b : b.cpp ;
exe c : c.cpp ;
alias bin1 : a ;
alias bin2 : a b ;
alias src : s.cpp ;
exe hello : hello.cpp src ;
""")
t.write("a.cpp", "int main() { return 0; }\n")
t.copy("a.cpp", "b.cpp")
t.copy("a.cpp", "c.cpp")
t.copy("a.cpp", "hello.cpp")
t.write("s.cpp", "")
# Check that targets to which "bin1" refers are updated,
# and only those.
t.run_build_system("bin1")
t.ignore("*.tds")
t.expect_addition(List("bin/$toolset/debug/") * "a.exe a.obj")
t.expect_nothing_more()
# Try again with "bin2"
t.run_build_system("bin2")
t.ignore("*.tds")
t.expect_addition(List("bin/$toolset/debug/") * "b.exe b.obj")
t.expect_nothing_more()
# Try building everything, making sure 'hello' target is
# created
t.run_build_system()
t.ignore("*.tds")
t.expect_addition("bin/$toolset/debug/hello.exe")
# Regression test.
# Check if usage requirements are propagated via "alias"
t.write("l.cpp", """
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
foo() {}
""")
t.write("Jamfile", """
lib l : l.cpp : : : <define>WANT_MAIN ;
alias la : l ;
exe main : main.cpp la ;
""")
t.write("main.cpp", """
#ifdef WANT_MAIN
int main() { return 0; }
#endif
""")
t.write("project-root.jam", "")
t.run_build_system()
t.cleanup()
|
the-stack_0_21558 | """Support for Iperf3 network measurement tool."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_PORT,
CONF_HOST,
CONF_PROTOCOL,
CONF_HOSTS,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
DOMAIN = "iperf3"
DATA_UPDATED = "{}_data_updated".format(DOMAIN)
_LOGGER = logging.getLogger(__name__)
CONF_DURATION = "duration"
CONF_PARALLEL = "parallel"
CONF_MANUAL = "manual"
DEFAULT_DURATION = 10
DEFAULT_PORT = 5201
DEFAULT_PARALLEL = 1
DEFAULT_PROTOCOL = "tcp"
DEFAULT_INTERVAL = timedelta(minutes=60)
ATTR_DOWNLOAD = "download"
ATTR_UPLOAD = "upload"
ATTR_VERSION = "Version"
ATTR_HOST = "host"
UNIT_OF_MEASUREMENT = "Mbit/s"
SENSOR_TYPES = {
ATTR_DOWNLOAD: [ATTR_DOWNLOAD.capitalize(), UNIT_OF_MEASUREMENT],
ATTR_UPLOAD: [ATTR_UPLOAD.capitalize(), UNIT_OF_MEASUREMENT],
}
PROTOCOLS = ["tcp", "udp"]
HOST_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): vol.Range(5, 10),
vol.Optional(CONF_PARALLEL, default=DEFAULT_PARALLEL): vol.Range(1, 20),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.In(PROTOCOLS),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOSTS): vol.All(cv.ensure_list, [HOST_CONFIG_SCHEMA]),
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(list(SENSOR_TYPES))]),
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_MANUAL, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST, default=None): cv.string})
async def async_setup(hass, config):
"""Set up the iperf3 component."""
import iperf3
hass.data[DOMAIN] = {}
conf = config[DOMAIN]
for host in conf[CONF_HOSTS]:
host_name = host[CONF_HOST]
client = iperf3.Client()
client.duration = host[CONF_DURATION]
client.server_hostname = host_name
client.port = host[CONF_PORT]
client.num_streams = host[CONF_PARALLEL]
client.protocol = host[CONF_PROTOCOL]
client.verbose = False
data = hass.data[DOMAIN][host_name] = Iperf3Data(hass, client)
if not conf[CONF_MANUAL]:
async_track_time_interval(hass, data.update, conf[CONF_SCAN_INTERVAL])
def update(call):
"""Service call to manually update the data."""
called_host = call.data[ATTR_HOST]
if called_host in hass.data[DOMAIN]:
hass.data[DOMAIN][called_host].update()
else:
for iperf3_host in hass.data[DOMAIN].values():
iperf3_host.update()
hass.services.async_register(DOMAIN, "speedtest", update, schema=SERVICE_SCHEMA)
hass.async_create_task(
async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, conf[CONF_MONITORED_CONDITIONS], config
)
)
return True
class Iperf3Data:
"""Get the latest data from iperf3."""
def __init__(self, hass, client):
"""Initialize the data object."""
self._hass = hass
self._client = client
self.data = {ATTR_DOWNLOAD: None, ATTR_UPLOAD: None, ATTR_VERSION: None}
@property
def protocol(self):
"""Return the protocol used for this connection."""
return self._client.protocol
@property
def host(self):
"""Return the host connected to."""
return self._client.server_hostname
@property
def port(self):
"""Return the port on the host connected to."""
return self._client.port
def update(self, now=None):
"""Get the latest data from iperf3."""
if self.protocol == "udp":
# UDP only have 1 way attribute
result = self._run_test(ATTR_DOWNLOAD)
self.data[ATTR_DOWNLOAD] = self.data[ATTR_UPLOAD] = getattr(
result, "Mbps", None
)
self.data[ATTR_VERSION] = getattr(result, "version", None)
else:
result = self._run_test(ATTR_DOWNLOAD)
self.data[ATTR_DOWNLOAD] = getattr(result, "received_Mbps", None)
self.data[ATTR_VERSION] = getattr(result, "version", None)
self.data[ATTR_UPLOAD] = getattr(
self._run_test(ATTR_UPLOAD), "sent_Mbps", None
)
dispatcher_send(self._hass, DATA_UPDATED, self.host)
def _run_test(self, test_type):
"""Run and return the iperf3 data."""
self._client.reverse = test_type == ATTR_DOWNLOAD
try:
result = self._client.run()
except (AttributeError, OSError, ValueError) as error:
_LOGGER.error("Iperf3 error: %s", error)
return None
if result is not None and hasattr(result, "error") and result.error is not None:
_LOGGER.error("Iperf3 error: %s", result.error)
return None
return result
|
the-stack_0_21559 | '''
Integration tests for states.
'''
import unittest as ut
import numpy as np
import dynamite_test_runner as dtr
from dynamite.states import State
class RandomSeed(dtr.DynamiteTestCase):
def test_generation(self):
'''
Make sure that different processors get the same random seed.
'''
from dynamite import config
config.initialize()
from petsc4py import PETSc
comm = PETSc.COMM_WORLD.tompi4py()
seed = State.generate_time_seed()
all_seeds = comm.gather(seed, root = 0)
if comm.rank == 0:
self.assertTrue(all(s == seed for s in all_seeds))
class ToNumpy(dtr.DynamiteTestCase):
def setUp(self):
from petsc4py import PETSc
self.v = PETSc.Vec().create()
self.v.setSizes(PETSc.COMM_WORLD.size)
self.v.setFromOptions()
self.v.set(-1)
self.v[PETSc.COMM_WORLD.rank] = PETSc.COMM_WORLD.rank
self.v.assemblyBegin()
self.v.assemblyEnd()
def test_to_zero(self):
from petsc4py import PETSc
npvec = State._to_numpy(self.v)
if PETSc.COMM_WORLD.rank == 0:
for i in range(PETSc.COMM_WORLD.rank):
self.assertTrue(npvec[i] == i)
else:
self.assertIs(npvec, None)
def test_to_all(self):
from petsc4py import PETSc
npvec = State._to_numpy(self.v, to_all = True)
for i in range(PETSc.COMM_WORLD.rank):
self.assertTrue(npvec[i] == i)
class PetscMethods(dtr.DynamiteTestCase):
'''
Tests that the methods directly included from PETSc function as intended.
'''
def test_norm(self):
state = State()
start, end = state.vec.getOwnershipRange()
state.vec[start:end] = np.array([1]*(end-start))
state.vec.assemblyBegin()
state.vec.assemblyEnd()
self.assertAlmostEqual(state.norm()**2, state.subspace.get_dimension())
def test_normalize(self):
state = State()
start, end = state.vec.getOwnershipRange()
state.vec[start:end] = np.array([1]*(end-start))
state.vec.assemblyBegin()
state.vec.assemblyEnd()
state.normalize()
self.assertTrue(state.norm() == 1)
def test_copy_preallocate(self):
state1 = State()
state2 = State()
start, end = state1.vec.getOwnershipRange()
state1.vec[start:end] = np.arange(start, end)
state1.vec.assemblyBegin()
state1.vec.assemblyEnd()
result = np.ndarray((end-start,), dtype=np.complex128)
state1.copy(state2)
result[:] = state2.vec[start:end]
self.assertTrue(np.array_equal(result, np.arange(start, end)))
def test_copy_exception_L(self):
state1 = State()
state2 = State(L=state1.subspace.L+1)
with self.assertRaises(ValueError):
state1.copy(state2)
def test_copy_nopreallocate(self):
state1 = State()
start, end = state1.vec.getOwnershipRange()
state1.vec[start:end] = np.arange(start, end)
state1.vec.assemblyBegin()
state1.vec.assemblyEnd()
result = np.ndarray((end-start,), dtype=np.complex128)
state2 = state1.copy()
result[:] = state2.vec[start:end]
self.assertTrue(np.array_equal(result, np.arange(start, end)))
def test_scale(self):
vals = [2, 3.14]
for val in vals:
with self.subTest(val=val):
state = State(state='random')
start, end = state.vec.getOwnershipRange()
pre_values = np.ndarray((end-start,), dtype=np.complex128)
pre_values[:] = state.vec[start:end]
state *= val
for i in range(start, end):
self.assertEqual(state.vec[i], val*pre_values[i-start])
def test_scale_divide(self):
val = 3.14
state = State(state='random')
start, end = state.vec.getOwnershipRange()
pre_values = np.ndarray((end-start,), dtype=np.complex128)
pre_values[:] = state.vec[start:end]
state /= val
for i in range(start, end):
self.assertEqual(state.vec[i], (1/val)*pre_values[i-start])
def test_scale_exception_ary(self):
val = np.array([3.1, 4])
state = State()
with self.assertRaises(TypeError):
state *= val
def test_scale_exception_vec(self):
state1 = State()
state2 = State()
with self.assertRaises(TypeError):
state1 *= state2
# TODO: check state setting. e.g. setting an invalid state should fail (doesn't for Full subspace)
if __name__ == '__main__':
dtr.main()
|
the-stack_0_21562 | import odml
def print_val(rank, curr):
# Cleanup the odml object print strings
print_str = str(curr).split()[0].split("[")[0].split(":")[0]
# Document has no name attribute and should not print id or name info
if hasattr(curr, "name"):
if curr.name and curr.name != curr.id:
print_str = "%s[%s]" % (print_str, curr.name)
else:
print_str = "%s[%s]" % (print_str, curr.id)
print("Validation%s: %s '%s'" % (rank.capitalize(), print_str, "some err message"))
doc = odml.Document()
sec = odml.Section(parent=doc)
_ = odml.Section(parent=sec)
_ = odml.Section(parent=sec)
subsec = odml.Section(parent=sec)
prop = odml.Property(parent=sec)
subprop = odml.Property(parent=subsec)
val = odml.validation.Validation(doc)
errors = list()
warnings = list()
reduce = set()
sec_count = 0
prop_count = 0
obj_valid_map = {}
valid_obj_map = {}
for i in val.errors:
vid = i.validation_id
if i.is_error:
errors.append(i)
else:
warnings.append(i)
if i.obj not in reduce and 'section' in str(i.obj).lower():
sec_count += 1
elif i.obj not in reduce and 'property' in str(i.obj).lower():
prop_count += 1
reduce.add(i.obj)
if i.obj in obj_valid_map:
obj_valid_map[i.obj].append(vid)
else:
obj_valid_map[i.obj] = [vid]
if vid in valid_obj_map:
valid_obj_map[vid].append(i.obj)
else:
valid_obj_map[vid] = [i.obj]
msg = "Validation found %s errors and %s warnings" % (len(errors), len(warnings))
msg += " in %s Sections and %s Properties." % (sec_count, prop_count)
msg += "\nRun 'odml.Validation' for details and to resolve the issues."
print(msg)
|
the-stack_0_21563 | import numpy as np
def bin_to_dec(bin_str):
"""Convert a string of bits to decimal."""
result = 0
for i, bit in enumerate(bin_str[::-1]):
result += int(bit) * 2**i
return result
if __name__ == "__main__":
data = []
with open("input", 'r') as file:
for line in file:
row = []
for char in line.strip():
row.append(int(char))
data.append(row)
data = np.asarray(data)
eps_bits = ''
gam_bits = ''
for i in range(len(data[0])):
if np.sum(data[:,i] == 1) > len(data)/2:
eps_bits += '1'
gam_bits += '0'
else:
eps_bits += '0'
gam_bits += '1'
print(eps_bits, gam_bits)
eps_rate = bin_to_dec(eps_bits)
gam_rate = bin_to_dec(gam_bits)
print(eps_rate, gam_rate)
print(eps_rate * gam_rate)
|
the-stack_0_21564 | """
Sum reduction
==================
"""
####################################################################
# Let's compute the (3000,3) tensor :math:`c` whose entries
# :math:`c_i^u` are given by:
#
# .. math::
# c_i^u = \sum_j (p-a_j)^2 \exp(x_i^u+y_j^u)
#
# where
#
# * :math:`x` is a (3000,3) tensor, with entries :math:`x_i^u`.
# * :math:`y` is a (5000,3) tensor, with entries :math:`y_j^u`.
# * :math:`a` is a (5000,1) tensor, with entries :math:`a_j`.
# * :math:`p` is a scalar, encoded as a vector of size (1,).
#
####################################################################
# Setup
# -----
#
# Standard imports:
import time
import matplotlib.pyplot as plt
import torch
from torch.autograd import grad
from pykeops.torch import Genred
#####################################################################
# Declare random inputs:
M = 3000
N = 5000
# Choose the storage place for our data : CPU (host) or GPU (device) memory.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = "float32" # Could be 'float32' or 'float64'
torchtype = torch.float32 if dtype == "float32" else torch.float64
x = torch.randn(M, 3, dtype=torchtype, device=device)
y = torch.randn(N, 3, dtype=torchtype, device=device, requires_grad=True)
a = torch.randn(N, 1, dtype=torchtype, device=device)
p = torch.randn(1, dtype=torchtype, device=device)
####################################################################
# Define a custom formula
# -----------------------
formula = "Square(p-a)*Exp(x+y)"
variables = [
"x = Vi(3)", # First arg : i-variable, of size 3
"y = Vj(3)", # Second arg : j-variable, of size 3
"a = Vj(1)", # Third arg : j-variable, of size 1 (scalar)
"p = Pm(1)",
] # Fourth arg : Parameter, of size 1 (scalar)
####################################################################
# Our sum reduction is performed over the index :math:`j`,
# i.e. on the axis ``1`` of the kernel matrix.
# The output c is an :math:`x`-variable indexed by :math:`i`.
my_routine = Genred(formula, variables, reduction_op="Sum", axis=1, dtype=dtype)
c = my_routine(x, y, a, p)
####################################################################
# Compute the gradient
# --------------------
# Now, let's compute the gradient of :math:`c` with
# respect to :math:`y`. Since :math:`c` is not scalar valued,
# its "gradient" :math:`\partial c` should be understood as the adjoint of the
# differential operator, i.e. as the linear operator that:
#
# - takes as input a new tensor :math:`e` with the shape of :math:`c`
# - outputs a tensor :math:`g` with the shape of :math:`y`
#
# such that for all variation :math:`\delta y` of :math:`y` we have:
#
# .. math::
#
# \langle \text{d} c . \delta y , e \rangle = \langle g , \delta y \rangle = \langle \delta y , \partial c . e \rangle
#
# Backpropagation is all about computing the tensor :math:`g=\partial c . e` efficiently, for arbitrary values of :math:`e`:
# Declare a new tensor of shape (M,3) used as the input of the gradient operator.
# It can be understood as a "gradient with respect to the output c"
# and is thus called "grad_output" in the documentation of PyTorch.
e = torch.rand_like(c)
# Call the gradient op:
start = time.time()
# PyTorch remark : grad(c, y, e) alone outputs a length 1 tuple, hence the need for [0].
g = grad(c, y, e)[0] # g = [∂_y c].e
print(
"Time to compute gradient of convolution operation with KeOps: ",
round(time.time() - start, 5),
"s",
)
####################################################################
# The equivalent code with a "vanilla" pytorch implementation
g_torch = (
(
(p - a.transpose(0, 1))[:, None] ** 2
* torch.exp(x.transpose(0, 1)[:, :, None] + y.transpose(0, 1)[:, None, :])
* e.transpose(0, 1)[:, :, None]
)
.sum(dim=1)
.transpose(0, 1)
)
# Plot the results next to each other:
for i in range(3):
plt.subplot(3, 1, i + 1)
plt.plot(g.detach().cpu().numpy()[:40, i], "-", label="KeOps")
plt.plot(g_torch.detach().cpu().numpy()[:40, i], "--", label="PyTorch")
plt.legend(loc="lower right")
plt.tight_layout()
plt.show()
|
the-stack_0_21566 | from model_classes import *
if __name__ == "__main__":
# Solve optimization multiple times
for j in range(10):
print()
print("Starting model {}".format(j))
print()
m = MultStudentT("test_model_1")
m.process_raw_data()
m.train_test_split(perc=0.95)
m.build_model()
m.load_trace()
m.compute_forecast()
m.build_prob()
m.solve_prob()
print(m.opt_weights)
del m
|
the-stack_0_21567 | """Test Home Assistant remote methods and classes."""
import pytest
from homeassistant import core
from homeassistant.helpers.json import JSONEncoder
from homeassistant.util import dt as dt_util
def test_json_encoder(hass):
"""Test the JSON Encoder."""
ha_json_enc = JSONEncoder()
state = core.State("test.test", "hello")
# Test serializing a datetime
now = dt_util.utcnow()
assert ha_json_enc.default(now) == now.isoformat()
# Test serializing a set()
data = {"milk", "beer"}
assert sorted(ha_json_enc.default(data)) == sorted(data)
# Test serializing an object which implements as_dict
assert ha_json_enc.default(state) == state.as_dict()
# Default method raises TypeError if non HA object
with pytest.raises(TypeError):
ha_json_enc.default(1)
|
the-stack_0_21569 | import unittest
import os
import re
import requests_mock
import tableauserverclient as TSC
import xml.etree.ElementTree as ET
from tableauserverclient.datetime_helpers import format_datetime
from tableauserverclient.server.endpoint.exceptions import InternalServerError
from tableauserverclient.server.request_factory import RequestFactory
from tableauserverclient.models.permissions_item import PermissionsRule
from tableauserverclient.models.user_item import UserItem
from tableauserverclient.models.group_item import GroupItem
from ._utils import asset
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
ADD_TAGS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_add_tags.xml')
GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_by_id.xml')
GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_empty.xml')
GET_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get.xml')
POPULATE_CONNECTIONS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_connections.xml')
POPULATE_PDF = os.path.join(TEST_ASSET_DIR, 'populate_pdf.pdf')
POPULATE_PERMISSIONS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_permissions.xml')
POPULATE_PREVIEW_IMAGE = os.path.join(TEST_ASSET_DIR, 'RESTAPISample Image.png')
POPULATE_VIEWS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_views.xml')
POPULATE_VIEWS_USAGE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_views_usage.xml')
PUBLISH_XML = os.path.join(TEST_ASSET_DIR, 'workbook_publish.xml')
PUBLISH_ASYNC_XML = os.path.join(TEST_ASSET_DIR, 'workbook_publish_async.xml')
REFRESH_XML = os.path.join(TEST_ASSET_DIR, 'workbook_refresh.xml')
UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_update.xml')
UPDATE_PERMISSIONS = os.path.join(TEST_ASSET_DIR, 'workbook_update_permissions.xml')
class WorkbookTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
# Fake sign in
self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67'
self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM'
self.baseurl = self.server.workbooks.baseurl
def test_get(self):
with open(GET_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl, text=response_xml)
all_workbooks, pagination_item = self.server.workbooks.get()
self.assertEqual(2, pagination_item.total_available)
self.assertEqual('6d13b0ca-043d-4d42-8c9d-3f3313ea3a00', all_workbooks[0].id)
self.assertEqual('Superstore', all_workbooks[0].name)
self.assertEqual('Superstore', all_workbooks[0].content_url)
self.assertEqual(False, all_workbooks[0].show_tabs)
self.assertEqual('http://tableauserver/#/workbooks/1/views', all_workbooks[0].webpage_url)
self.assertEqual(1, all_workbooks[0].size)
self.assertEqual('2016-08-03T20:34:04Z', format_datetime(all_workbooks[0].created_at))
self.assertEqual('description for Superstore', all_workbooks[0].description)
self.assertEqual('2016-08-04T17:56:41Z', format_datetime(all_workbooks[0].updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_workbooks[0].project_id)
self.assertEqual('default', all_workbooks[0].project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_workbooks[0].owner_id)
self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', all_workbooks[1].id)
self.assertEqual('SafariSample', all_workbooks[1].name)
self.assertEqual('SafariSample', all_workbooks[1].content_url)
self.assertEqual('http://tableauserver/#/workbooks/2/views', all_workbooks[1].webpage_url)
self.assertEqual(False, all_workbooks[1].show_tabs)
self.assertEqual(26, all_workbooks[1].size)
self.assertEqual('2016-07-26T20:34:56Z', format_datetime(all_workbooks[1].created_at))
self.assertEqual('description for SafariSample', all_workbooks[1].description)
self.assertEqual('2016-07-26T20:35:05Z', format_datetime(all_workbooks[1].updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_workbooks[1].project_id)
self.assertEqual('default', all_workbooks[1].project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_workbooks[1].owner_id)
self.assertEqual(set(['Safari', 'Sample']), all_workbooks[1].tags)
def test_get_before_signin(self):
self.server._auth_token = None
self.assertRaises(TSC.NotSignedInError, self.server.workbooks.get)
def test_get_empty(self):
with open(GET_EMPTY_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl, text=response_xml)
all_workbooks, pagination_item = self.server.workbooks.get()
self.assertEqual(0, pagination_item.total_available)
self.assertEqual([], all_workbooks)
def test_get_by_id(self):
with open(GET_BY_ID_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42', text=response_xml)
single_workbook = self.server.workbooks.get_by_id('3cc6cd06-89ce-4fdc-b935-5294135d6d42')
self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', single_workbook.id)
self.assertEqual('SafariSample', single_workbook.name)
self.assertEqual('SafariSample', single_workbook.content_url)
self.assertEqual('http://tableauserver/#/workbooks/2/views', single_workbook.webpage_url)
self.assertEqual(False, single_workbook.show_tabs)
self.assertEqual(26, single_workbook.size)
self.assertEqual('2016-07-26T20:34:56Z', format_datetime(single_workbook.created_at))
self.assertEqual('description for SafariSample', single_workbook.description)
self.assertEqual('2016-07-26T20:35:05Z', format_datetime(single_workbook.updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', single_workbook.project_id)
self.assertEqual('default', single_workbook.project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', single_workbook.owner_id)
self.assertEqual(set(['Safari', 'Sample']), single_workbook.tags)
self.assertEqual('d79634e1-6063-4ec9-95ff-50acbf609ff5', single_workbook.views[0].id)
self.assertEqual('ENDANGERED SAFARI', single_workbook.views[0].name)
self.assertEqual('SafariSample/sheets/ENDANGEREDSAFARI', single_workbook.views[0].content_url)
def test_get_by_id_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.get_by_id, '')
def test_refresh_id(self):
self.server.version = '2.8'
self.baseurl = self.server.workbooks.baseurl
with open(REFRESH_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42/refresh',
status_code=202, text=response_xml)
self.server.workbooks.refresh('3cc6cd06-89ce-4fdc-b935-5294135d6d42')
def test_refresh_object(self):
self.server.version = '2.8'
self.baseurl = self.server.workbooks.baseurl
workbook = TSC.WorkbookItem('')
workbook._id = '3cc6cd06-89ce-4fdc-b935-5294135d6d42'
with open(REFRESH_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42/refresh',
status_code=202, text=response_xml)
self.server.workbooks.refresh(workbook)
def test_delete(self):
with requests_mock.mock() as m:
m.delete(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42', status_code=204)
self.server.workbooks.delete('3cc6cd06-89ce-4fdc-b935-5294135d6d42')
def test_delete_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.delete, '')
def test_update(self):
with open(UPDATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=response_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74', show_tabs=True)
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
single_workbook.owner_id = 'dd2239f6-ddf1-4107-981a-4cf94e415794'
single_workbook.name = 'renamedWorkbook'
single_workbook.data_acceleration_config = {'acceleration_enabled': True,
'accelerate_now': False,
'last_updated_at': None,
'acceleration_status': None}
single_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual('1f951daf-4061-451a-9df1-69a8062664f2', single_workbook.id)
self.assertEqual(True, single_workbook.show_tabs)
self.assertEqual('1d0304cd-3796-429f-b815-7258370b9b74', single_workbook.project_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', single_workbook.owner_id)
self.assertEqual('renamedWorkbook', single_workbook.name)
self.assertEqual(True, single_workbook.data_acceleration_config['acceleration_enabled'])
self.assertEqual(False, single_workbook.data_acceleration_config['accelerate_now'])
def test_update_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.update, single_workbook)
def test_update_copy_fields(self):
with open(POPULATE_CONNECTIONS_XML, 'rb') as f:
connection_xml = f.read().decode('utf-8')
with open(UPDATE_XML, 'rb') as f:
update_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/connections', text=connection_xml)
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=update_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_connections(single_workbook)
updated_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual(single_workbook._connections, updated_workbook._connections)
self.assertEqual(single_workbook._views, updated_workbook._views)
self.assertEqual(single_workbook.tags, updated_workbook.tags)
self.assertEqual(single_workbook._initial_tags, updated_workbook._initial_tags)
self.assertEqual(single_workbook._preview_image, updated_workbook._preview_image)
def test_update_tags(self):
with open(ADD_TAGS_XML, 'rb') as f:
add_tags_xml = f.read().decode('utf-8')
with open(UPDATE_XML, 'rb') as f:
update_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags', text=add_tags_xml)
m.delete(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags/b', status_code=204)
m.delete(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags/d', status_code=204)
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=update_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
single_workbook._initial_tags.update(['a', 'b', 'c', 'd'])
single_workbook.tags.update(['a', 'c', 'e'])
updated_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual(single_workbook.tags, updated_workbook.tags)
self.assertEqual(single_workbook._initial_tags, updated_workbook._initial_tags)
def test_download(self):
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content',
headers={'Content-Disposition': 'name="tableau_workbook"; filename="RESTAPISample.twbx"'})
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2')
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_sanitizes_name(self):
filename = "Name,With,Commas.twbx"
disposition = 'name="tableau_workbook"; filename="{}"'.format(filename)
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content',
headers={'Content-Disposition': disposition})
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2')
self.assertEqual(os.path.basename(file_path), "NameWithCommas.twbx")
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_extract_only(self):
# Pretend we're 2.5 for 'extract_only'
self.server.version = "2.5"
self.baseurl = self.server.workbooks.baseurl
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content?includeExtract=False',
headers={'Content-Disposition': 'name="tableau_workbook"; filename="RESTAPISample.twbx"'},
complete_qs=True)
# Technically this shouldn't download a twbx, but we are interested in the qs, not the file
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2', include_extract=False)
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.download, '')
def test_populate_views(self):
with open(POPULATE_VIEWS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/views', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_views(single_workbook)
views_list = single_workbook.views
self.assertEqual('097dbe13-de89-445f-b2c3-02f28bd010c1', views_list[0].id)
self.assertEqual('GDP per capita', views_list[0].name)
self.assertEqual('RESTAPISample/sheets/GDPpercapita', views_list[0].content_url)
self.assertEqual('2c1ab9d7-8d64-4cc6-b495-52e40c60c330', views_list[1].id)
self.assertEqual('Country ranks', views_list[1].name)
self.assertEqual('RESTAPISample/sheets/Countryranks', views_list[1].content_url)
self.assertEqual('0599c28c-6d82-457e-a453-e52c1bdb00f5', views_list[2].id)
self.assertEqual('Interest rates', views_list[2].name)
self.assertEqual('RESTAPISample/sheets/Interestrates', views_list[2].content_url)
def test_populate_views_with_usage(self):
with open(POPULATE_VIEWS_USAGE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/views?includeUsageStatistics=true',
text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_views(single_workbook, usage=True)
views_list = single_workbook.views
self.assertEqual('097dbe13-de89-445f-b2c3-02f28bd010c1', views_list[0].id)
self.assertEqual(2, views_list[0].total_views)
self.assertEqual('2c1ab9d7-8d64-4cc6-b495-52e40c60c330', views_list[1].id)
self.assertEqual(37, views_list[1].total_views)
self.assertEqual('0599c28c-6d82-457e-a453-e52c1bdb00f5', views_list[2].id)
self.assertEqual(0, views_list[2].total_views)
def test_populate_views_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.populate_views, single_workbook)
def test_populate_connections(self):
with open(POPULATE_CONNECTIONS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/connections', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_connections(single_workbook)
self.assertEqual('37ca6ced-58d7-4dcf-99dc-f0a85223cbef', single_workbook.connections[0].id)
self.assertEqual('dataengine', single_workbook.connections[0].connection_type)
self.assertEqual('4506225a-0d32-4ab1-82d3-c24e85f7afba', single_workbook.connections[0].datasource_id)
self.assertEqual('World Indicators', single_workbook.connections[0].datasource_name)
def test_populate_permissions(self):
with open(POPULATE_PERMISSIONS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/21778de4-b7b9-44bc-a599-1506a2639ace/permissions', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '21778de4-b7b9-44bc-a599-1506a2639ace'
self.server.workbooks.populate_permissions(single_workbook)
permissions = single_workbook.permissions
self.assertEqual(permissions[0].grantee.tag_name, 'group')
self.assertEqual(permissions[0].grantee.id, '5e5e1978-71fa-11e4-87dd-7382f5c437af')
self.assertDictEqual(permissions[0].capabilities, {
TSC.Permission.Capability.WebAuthoring: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.Filter: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.AddComment: TSC.Permission.Mode.Allow
})
self.assertEqual(permissions[1].grantee.tag_name, 'user')
self.assertEqual(permissions[1].grantee.id, '7c37ee24-c4b1-42b6-a154-eaeab7ee330a')
self.assertDictEqual(permissions[1].capabilities, {
TSC.Permission.Capability.ExportImage: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.ShareView: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Deny,
TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Deny
})
def test_add_permissions(self):
with open(UPDATE_PERMISSIONS, 'rb') as f:
response_xml = f.read().decode('utf-8')
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '21778de4-b7b9-44bc-a599-1506a2639ace'
bob = UserItem.as_reference("7c37ee24-c4b1-42b6-a154-eaeab7ee330a")
group_of_people = GroupItem.as_reference("5e5e1978-71fa-11e4-87dd-7382f5c437af")
new_permissions = [
PermissionsRule(bob, {'Write': 'Allow'}),
PermissionsRule(group_of_people, {'Read': 'Deny'})
]
with requests_mock.mock() as m:
m.put(self.baseurl + "/21778de4-b7b9-44bc-a599-1506a2639ace/permissions", text=response_xml)
permissions = self.server.workbooks.update_permissions(single_workbook, new_permissions)
self.assertEqual(permissions[0].grantee.tag_name, 'group')
self.assertEqual(permissions[0].grantee.id, '5e5e1978-71fa-11e4-87dd-7382f5c437af')
self.assertDictEqual(permissions[0].capabilities, {
TSC.Permission.Capability.Read: TSC.Permission.Mode.Deny
})
self.assertEqual(permissions[1].grantee.tag_name, 'user')
self.assertEqual(permissions[1].grantee.id, '7c37ee24-c4b1-42b6-a154-eaeab7ee330a')
self.assertDictEqual(permissions[1].capabilities, {
TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow
})
def test_populate_connections_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError,
self.server.workbooks.populate_connections,
single_workbook)
def test_populate_pdf(self):
self.server.version = "3.4"
self.baseurl = self.server.workbooks.baseurl
with open(POPULATE_PDF, "rb") as f:
response = f.read()
with requests_mock.mock() as m:
m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/pdf?type=a5&orientation=landscape",
content=response)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
type = TSC.PDFRequestOptions.PageType.A5
orientation = TSC.PDFRequestOptions.Orientation.Landscape
req_option = TSC.PDFRequestOptions(type, orientation)
self.server.workbooks.populate_pdf(single_workbook, req_option)
self.assertEqual(response, single_workbook.pdf)
def test_populate_preview_image(self):
with open(POPULATE_PREVIEW_IMAGE, 'rb') as f:
response = f.read()
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/previewImage', content=response)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_preview_image(single_workbook)
self.assertEqual(response, single_workbook.preview_image)
def test_populate_preview_image_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError,
self.server.workbooks.populate_preview_image,
single_workbook)
def test_publish(self):
with open(PUBLISH_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl, text=response_xml)
new_workbook = TSC.WorkbookItem(name='Sample',
show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
sample_workbook = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx')
publish_mode = self.server.PublishMode.CreateNew
new_workbook = self.server.workbooks.publish(new_workbook,
sample_workbook,
publish_mode)
self.assertEqual('a8076ca1-e9d8-495e-bae6-c684dbb55836', new_workbook.id)
self.assertEqual('RESTAPISample', new_workbook.name)
self.assertEqual('RESTAPISample_0', new_workbook.content_url)
self.assertEqual(False, new_workbook.show_tabs)
self.assertEqual(1, new_workbook.size)
self.assertEqual('2016-08-18T18:33:24Z', format_datetime(new_workbook.created_at))
self.assertEqual('2016-08-18T20:31:34Z', format_datetime(new_workbook.updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', new_workbook.project_id)
self.assertEqual('default', new_workbook.project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', new_workbook.owner_id)
self.assertEqual('fe0b4e89-73f4-435e-952d-3a263fbfa56c', new_workbook.views[0].id)
self.assertEqual('GDP per capita', new_workbook.views[0].name)
self.assertEqual('RESTAPISample_0/sheets/GDPpercapita', new_workbook.views[0].content_url)
def test_publish_with_hidden_view(self):
with open(PUBLISH_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl, text=response_xml)
new_workbook = TSC.WorkbookItem(name='Sample',
show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
sample_workbook = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx')
publish_mode = self.server.PublishMode.CreateNew
new_workbook = self.server.workbooks.publish(new_workbook,
sample_workbook,
publish_mode,
hidden_views=['GDP per capita'])
request_body = m._adapter.request_history[0]._request.body
self.assertTrue(re.search(rb'<views><view.*?hidden=\"true\".*?\/><\/views>', request_body))
self.assertTrue(re.search(rb'<views><view.*?name=\"GDP per capita\".*?\/><\/views>', request_body))
def test_publish_async(self):
self.server.version = '3.0'
baseurl = self.server.workbooks.baseurl
with open(PUBLISH_ASYNC_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl, text=response_xml)
new_workbook = TSC.WorkbookItem(name='Sample',
show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
sample_workbook = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx')
publish_mode = self.server.PublishMode.CreateNew
new_job = self.server.workbooks.publish(new_workbook,
sample_workbook,
publish_mode,
as_job=True)
self.assertEqual('7c3d599e-949f-44c3-94a1-f30ba85757e4', new_job.id)
self.assertEqual('PublishWorkbook', new_job.type)
self.assertEqual('0', new_job.progress)
self.assertEqual('2018-06-29T23:22:32Z', format_datetime(new_job.created_at))
self.assertEqual('1', new_job.finish_code)
def test_publish_invalid_file(self):
new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
self.assertRaises(IOError, self.server.workbooks.publish, new_workbook, '.',
self.server.PublishMode.CreateNew)
def test_publish_invalid_file_type(self):
new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
self.assertRaises(ValueError, self.server.workbooks.publish,
new_workbook, os.path.join(TEST_ASSET_DIR, 'SampleDS.tds'),
self.server.PublishMode.CreateNew)
def test_publish_multi_connection(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection1 = TSC.ConnectionItem()
connection1.server_address = 'mysql.test.com'
connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
connection2 = TSC.ConnectionItem()
connection2.server_address = 'pgsql.test.com'
connection2.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
response = RequestFactory.Workbook._generate_xml(new_workbook, connections=[connection1, connection2])
# Can't use ConnectionItem parser due to xml namespace problems
connection_results = ET.fromstring(response).findall('.//connection')
self.assertEqual(connection_results[0].get('serverAddress', None), 'mysql.test.com')
self.assertEqual(connection_results[0].find('connectionCredentials').get('name', None), 'test')
self.assertEqual(connection_results[1].get('serverAddress', None), 'pgsql.test.com')
self.assertEqual(connection_results[1].find('connectionCredentials').get('password', None), 'secret')
def test_publish_single_connection(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection_creds = TSC.ConnectionCredentials('test', 'secret', True)
response = RequestFactory.Workbook._generate_xml(new_workbook, connection_credentials=connection_creds)
# Can't use ConnectionItem parser due to xml namespace problems
credentials = ET.fromstring(response).findall('.//connectionCredentials')
self.assertEqual(len(credentials), 1)
self.assertEqual(credentials[0].get('name', None), 'test')
self.assertEqual(credentials[0].get('password', None), 'secret')
self.assertEqual(credentials[0].get('embed', None), 'true')
def test_credentials_and_multi_connect_raises_exception(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection_creds = TSC.ConnectionCredentials('test', 'secret', True)
connection1 = TSC.ConnectionItem()
connection1.server_address = 'mysql.test.com'
connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
with self.assertRaises(RuntimeError):
response = RequestFactory.Workbook._generate_xml(new_workbook,
connection_credentials=connection_creds,
connections=[connection1])
def test_synchronous_publish_timeout_error(self):
with requests_mock.mock() as m:
m.register_uri('POST', self.baseurl, status_code=504)
new_workbook = TSC.WorkbookItem(project_id='')
publish_mode = self.server.PublishMode.CreateNew
self.assertRaisesRegex(InternalServerError, 'Please use asynchronous publishing to avoid timeouts',
self.server.workbooks.publish, new_workbook, asset('SampleWB.twbx'), publish_mode)
|
the-stack_0_21570 | import datetime
import json
from enum import Enum
import numpy as np
import sqlalchemy as db
from sqlalchemy import Column, Integer, String, DateTime, Text, ForeignKey, or_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy_utils import database_exists, create_database as cd, drop_database as dba
from . import config
from .utils import delete_data_file, save_data_to_file, Singleton
Base = declarative_base()
class OpStatus(Enum):
PENDING = "pending"
COMPUTING = "computing"
COMPUTED = "computed"
FAILED = "failed"
class GraphStatus(Enum):
PENDING = "pending"
COMPUTING = "computing"
COMPUTED = "computed"
FAILED = "failed"
class ClientOpMappingStatus(Enum):
SENT = "sent"
ACKNOWLEDGED = "acknowledged"
NOT_ACKNOWLEDGED = "not_acknowledged"
COMPUTING = "computing"
COMPUTED = "computed"
NOT_COMPUTED = "not_computed"
FAILED = "failed"
REJECTED = "rejected"
class Graph(Base):
__tablename__ = 'graph'
id = Column(Integer, primary_key=True)
ops = relationship("Op", backref="graph")
# Status of this graph 1. pending 2. computing 3. computed 4. failed
status = Column(String(10), default="pending")
created_at = Column(DateTime, default=datetime.datetime.utcnow)
class Data(Base):
__tablename__ = 'data'
id = Column(Integer, primary_key=True)
type = Column(String(20), nullable=False)
file_path = Column(String(200), nullable=True)
value = Column(String(100), nullable=True)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
class Client(Base):
__tablename__ = 'client'
id = Column(Integer, primary_key=True)
client_id = Column(String(100), nullable=False)
client_ip = Column(String(20), nullable=True)
status = Column(String(20), nullable=False, default="disconnected")
# 1. ravop 2. ravjs
type = Column(String(10), nullable=True)
client_ops = relationship("ClientOpMapping", backref="client", lazy="dynamic")
connected_at = Column(DateTime, default=datetime.datetime.utcnow)
disconnected_at = Column(DateTime, default=datetime.datetime.utcnow)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
class Op(Base):
__tablename__ = 'op'
id = Column(Integer, primary_key=True)
# Op name
name = Column(String(20), nullable=True)
# Graph id
graph_id = Column(Integer, ForeignKey('graph.id'))
# 1. input 2. output 3. middle
node_type = Column(String(10), nullable=False)
# Store list of op ids
inputs = Column(Text, nullable=True)
# Store filenames - Pickle files
outputs = Column(String(100), nullable=True)
# Op type for no change in values
op_type = Column(String(50), nullable=False)
operator = Column(String(50), nullable=False)
# 1. pending 2. computing 3. computed 4. failed
status = Column(String(10), default="pending")
message = Column(Text, nullable=True)
# Dict of params
params = Column(Text, nullable=True)
op_mappings = relationship("ClientOpMapping", backref="op", lazy="dynamic")
created_at = Column(DateTime, default=datetime.datetime.utcnow)
class ClientOpMapping(Base):
__tablename__ = "client_op_mapping"
id = Column(Integer, primary_key=True)
client_id = Column(Integer, ForeignKey('client.id'))
op_id = Column(Integer, ForeignKey('op.id'))
sent_time = Column(DateTime, default=None)
response_time = Column(DateTime, default=None)
# 1. computing 2. computed 3. failed
status = Column(String(10), default="computing")
created_at = Column(DateTime, default=datetime.datetime.utcnow)
@Singleton
class DBManager(object):
def __init__(self):
self.create_database()
self.engine, self.session = self.connect()
def connect(self):
engine = db.create_engine(config.RDF_DATABASE_URI, isolation_level="READ UNCOMMITTED")
connection = engine.connect()
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
return engine, session
def create_database(self):
if not database_exists(config.RDF_DATABASE_URI):
cd(config.RDF_DATABASE_URI)
print("Database created")
def drop_database(self):
if database_exists(config.RDF_DATABASE_URI):
dba(config.RDF_DATABASE_URI)
print("Database dropped")
def create_session(self):
"""
Create a new session
"""
DBSession = sessionmaker(bind=self.engine)
return DBSession()
def create_tables(self):
Base.metadata.create_all(self.engine)
def refresh(self, obj):
"""
Refresh an object
"""
self.session.refresh(obj)
return obj
def get(self, name, id):
if name == "op":
obj = self.session.query(Op).get(id)
elif name == "data":
obj = self.session.query(Data).get(id)
elif name == "graph":
obj = self.session.query(Graph).get(id)
elif name == "client":
obj = self.session.query(Client).get(id)
else:
obj = None
return obj
def add(self, name, **kwargs):
if name == "op":
obj = Op()
elif name == "data":
obj = Data()
elif name == "graph":
obj = Graph()
elif name == "client":
obj = Client()
else:
obj = None
for key, value in kwargs.items():
setattr(obj, key, value)
self.session.add(obj)
self.session.commit()
return obj
def update(self, name, id, **kwargs):
if name == "op":
obj = self.session.query(Op).get(id)
elif name == "data":
obj = self.session.query(Data).get(id)
elif name == "graph":
obj = self.session.query(Graph).get(id)
elif name == "client":
obj = self.session.query(Client).get(id)
else:
obj = None
for key, value in kwargs.items():
setattr(obj, key, value)
self.session.commit()
return obj
def delete(self, obj):
self.session.delete(obj)
self.session.commit()
def create_op(self, **kwargs):
op = Op()
for key, value in kwargs.items():
setattr(op, key, value)
self.session.add(op)
self.session.commit()
return op
def get_op(self, op_id):
"""
Get an existing op
"""
return self.session.query(Op).get(op_id)
def update_op(self, op, **kwargs):
for key, value in kwargs.items():
setattr(op, key, value)
self.session.commit()
return op
def create_data(self, **kwargs):
data = Data()
for key, value in kwargs.items():
setattr(data, key, value)
self.session.add(data)
self.session.commit()
return data
def get_data(self, data_id):
"""
Get an existing data
"""
return self.session.query(Data).get(data_id)
def update_data(self, data, **kwargs):
for key, value in kwargs.items():
setattr(data, key, value)
self.session.commit()
return data
def delete_data(self, data_id):
data = self.session.query(Data).get(data_id)
self.session.delete(data)
self.session.commit()
def create_data_complete(self, data, data_type):
# print("Creating data:", data)
if isinstance(data, (np.ndarray, np.generic)):
if data.ndim == 1:
data = data[..., np.newaxis]
d = self.create_data(type=data_type)
# Save file
file_path = save_data_to_file(d.id, data, data_type)
# Update file path
self.update(d, file_path=file_path)
return d
def get_op_status(self, op_id):
status = self.session.query(Op).get(op_id).status
return status
def get_graph(self, graph_id):
"""
Get an existing graph
"""
return self.session.query(Graph).get(graph_id)
def create_graph(self):
"""
Create a new graph
"""
graph = Graph()
self.session.add(graph)
self.session.commit()
return graph
def get_graph_ops(self, graph_id):
return self.session.query(Op).filter(Op.graph_id == graph_id).all()
def delete_graph_ops(self, graph_id):
print("Deleting graph ops")
ops = self.get_graph_ops(graph_id=graph_id)
for op in ops:
print("Op id:{}".format(op.id))
data_ids = json.loads(op.outputs)
if data_ids is not None:
for data_id in data_ids:
print("Data id:{}".format(data_id))
# Delete data file
delete_data_file(data_id)
# Delete data object
self.delete_data(data_id)
# Delete op object
self.delete(op)
def create_client(self, **kwargs):
obj = Client()
for key, value in kwargs.items():
setattr(obj, key, value)
self.session.add(obj)
self.session.commit()
return obj
def get_client(self, client_id):
"""
Get an existing client
"""
return self.session.query(Client).get(client_id)
def get_client_by_sid(self, sid):
"""
Get an existing client by sid
"""
return self.session.query(Client).filter(Client.client_id == sid).first()
def update_client(self, client, **kwargs):
for key, value in kwargs.items():
setattr(client, key, value)
self.session.commit()
return client
def get_all_clients(self):
return self.session.query(Client).order_by(Client.created_at.desc()).all()
def get_all_graphs(self):
return self.session.query(Graph).order_by(Graph.created_at.desc()).all()
def get_all_ops(self):
return self.session.query(Op).order_by(Op.id.desc()).all()
# def deactivate_all_graphs(self):
# for graph in self.session.query(Graph).all():
# graph.status = "inactive"
#
# self.session.commit()
#
# def deactivate_graph(self, graph_id):
# graph = self.get_graph(graph_id=graph_id)
# graph.status = "inactive"
# self.session.commit()
def disconnect_all_clients(self):
for cliet in self.session.query(Client).all():
cliet.status = "disconnected"
self.session.commit()
def disconnect_client(self, client_id):
client = self.get_client(client_id=client_id)
client.status = "disconnected"
self.session.commit()
def get_ops_by_name(self, op_name, graph_id=None):
if graph_id is not None:
ops = self.session.query(Op).filter(Op.graph_id == graph_id).filter(Op.name.contains(op_name)).all()
else:
ops = self.session.query(Op).filter(Op.name.contains(op_name)).all()
return ops
def get_op_readiness(self, op):
"""
Get op readiness
"""
inputs = json.loads(op.inputs)
params = json.loads(op.params)
cs = 0
for input_op in inputs:
input_op1 = self.get_op(op_id=input_op)
if input_op1.status in ["pending", "computing"]:
return "parent_op_not_ready"
elif input_op1.status == "failed":
return "parent_op_failed"
elif input_op1.status == "computed":
cs += 1
for index, value in params.items():
if type(value).__name__ == "int":
cop = self.get_op(op_id=value)
if cop.status in ["pending", "computing"]:
return "parent_op_not_ready"
elif cop.status == "failed":
return "parent_op_failed"
elif cop.status == "computed":
cs += 1
else:
cs += 1
if cs == len(inputs) + len(params.keys()):
return "ready"
else:
return "not_ready"
def get_ops_without_graph(self, status=None):
"""
Get a list of all ops not associated to any graph
"""
if status is not None:
return self.session.query(Op).filter(Op.graph_id is None).filter(Op.status == status).all()
else:
return self.session.query(Op).filter(Op.graph_id is None).all()
def get_graphs(self, status=None):
"""
Get a list of graphs
"""
if status is not None:
return self.session.query(Graph).filter(Graph.status == status).all()
else:
self.session.query(Graph).all()
def get_clients(self, status=None):
"""
Get a list of clients
"""
if status is not None:
return self.session.query(Client).filter(Client.status == status).all()
else:
return self.session.query(Client).all()
def get_available_clients(self):
"""
Get all clients which are available
"""
clients = self.session.query(Client).filter(Client.status == "connected").all()
client_list = []
for client in clients:
client_ops = client.client_ops.filter(or_(ClientOpMapping.status == ClientOpMappingStatus.SENT,
ClientOpMapping.status == ClientOpMappingStatus.ACKNOWLEDGED.value,
ClientOpMapping.status == ClientOpMappingStatus.COMPUTING.value))
if client_ops.count() == 0:
client_list.append(client)
return client_list
def get_ops(self, graph_id=None, status=None):
"""
Get a list of ops based on certain parameters
"""
if graph_id is None and status is None:
return self.session.query(Op).all()
elif graph_id is not None and status is not None:
return self.session.query(Op).filter(Op.graph_id == graph_id).filter(Op.status == status).all()
else:
if graph_id is not None:
return self.session.query(Op).filter(Op.graph_id == graph_id).all()
elif status is not None:
return self.session.query(Op).filter(Op.status == status).all()
else:
return self.session.query(Op).all()
def create_client_op_mapping(self, **kwargs):
mapping = ClientOpMapping()
for key, value in kwargs.items():
setattr(mapping, key, value)
self.session.add(mapping)
self.session.commit()
return mapping
def update_client_op_mapping(self, client_op_mapping_id, **kwargs):
mapping = self.session.query(ClientOpMapping).get(client_op_mapping_id)
for key, value in kwargs.items():
setattr(mapping, key, value)
self.session.commit()
return mapping
def find_client_op_mapping(self, client_id, op_id):
mapping = self.session.query(ClientOpMapping).filter(ClientOpMapping.client_id == client_id,
ClientOpMapping.op_id == op_id).first()
return mapping
def get_incomplete_op(self):
ops = self.session.query(Op).filter(Op.status == OpStatus.COMPUTING.value).all()
for op in ops:
op_mappings = op.op_mappings
if op_mappings.filter(ClientOpMapping.status == ClientOpMappingStatus.SENT.value).count() >= 3 or \
op_mappings.filter(ClientOpMapping.status == ClientOpMappingStatus.COMPUTING.value).count() >= 2 \
or op_mappings.filter(ClientOpMapping.status == ClientOpMappingStatus.REJECTED.value).count() >= 5 \
or op_mappings.filter(ClientOpMapping.status == ClientOpMappingStatus.FAILED.value).count() >= 3:
continue
return op
return None
def get_op_status_final(self, op_id):
op = self.get_op(op_id=op_id)
op_mappings = op.op_mappings
if op_mappings.filter(ClientOpMapping.status == ClientOpMappingStatus.FAILED.value).count() >= 3:
return "failed"
return "computing"
def get_first_graph_op(self, graph_id):
"""
Return the first graph op
"""
ops = self.get_graph_ops(graph_id=graph_id)
return ops[0]
def get_last_graph_op(self, graph_id):
"""
Return the last graph op
"""
ops = self.get_graph_ops(graph_id=graph_id)
return ops[-1]
|
the-stack_0_21571 | # -*- coding: utf-8 -*-
"""Workchain to compute a band structure for a given structure using Quantum ESPRESSO pw.x."""
from aiida import orm
from aiida.common import AttributeDict
from aiida.plugins import WorkflowFactory
from aiida.engine import WorkChain, ToContext, if_
from aiida_quantumespresso.calculations.functions.seekpath_structure_analysis import seekpath_structure_analysis
from aiida_quantumespresso.utils.mapping import prepare_process_inputs
from ..protocols.utils import ProtocolMixin
PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.base')
PwRelaxWorkChain = WorkflowFactory('quantumespresso.pw.relax')
def validate_inputs(inputs, ctx=None): # pylint: disable=unused-argument
"""Validate the inputs of the entire input namespace."""
# pylint: disable=no-member
if 'nbands_factor' in inputs and 'nbnd' in inputs['bands']['pw']['parameters'].get_attribute('SYSTEM', {}):
return PwBandsWorkChain.exit_codes.ERROR_INVALID_INPUT_NUMBER_OF_BANDS.message
# Cannot specify both `bands_kpoints` and `bands_kpoints_distance`
if all([key in inputs for key in ['bands_kpoints', 'bands_kpoints_distance']]):
return PwBandsWorkChain.exit_codes.ERROR_INVALID_INPUT_KPOINTS.message
class PwBandsWorkChain(ProtocolMixin, WorkChain):
"""Workchain to compute a band structure for a given structure using Quantum ESPRESSO pw.x.
The logic for the computation of various parameters for the BANDS step is as follows:
Number of bands:
One can specify the number of bands to be used in the BANDS step either directly through the input parameters
`bands.pw.parameters.SYSTEM.nbnd` or through `nbands_factor`. Note that specifying both is not allowed. When
neither is specified nothing will be set by the work chain and the default of Quantum ESPRESSO will end up being
used. If the `nbands_factor` is specified the maximum value of the following values will be used:
* `nbnd` of the preceding SCF calculation
* 0.5 * nspin * nelectrons * nbands_factor
* 0.5 * nspin * nelectrons + 4 * nspin
Kpoints:
There are three options; specify either an existing `KpointsData` through `bands_kpoints`, or specify the
`bands_kpoint_distance`, or specify neither. For the former those exact kpoints will be used for the BANDS step.
In the two other cases, the structure will first be normalized using SeekPath and the path along high-symmetry
k-points will be generated on that structure. The distance between kpoints for the path will be equal to that
of `bands_kpoints_distance` or the SeekPath default if not specified.
"""
@classmethod
def define(cls, spec):
"""Define the process specification."""
# yapf: disable
super().define(spec)
spec.expose_inputs(PwRelaxWorkChain, namespace='relax', exclude=('clean_workdir', 'structure'),
namespace_options={'required': False, 'populate_defaults': False,
'help': 'Inputs for the `PwRelaxWorkChain`, if not specified at all, the relaxation step is skipped.'})
spec.expose_inputs(PwBaseWorkChain, namespace='scf',
exclude=('clean_workdir', 'pw.structure'),
namespace_options={'help': 'Inputs for the `PwBaseWorkChain` for the SCF calculation.'})
spec.expose_inputs(PwBaseWorkChain, namespace='bands',
exclude=('clean_workdir', 'pw.structure', 'pw.kpoints', 'pw.kpoints_distance'),
namespace_options={'help': 'Inputs for the `PwBaseWorkChain` for the BANDS calculation.'})
spec.input('structure', valid_type=orm.StructureData, help='The inputs structure.')
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.input('nbands_factor', valid_type=orm.Float, required=False,
help='The number of bands for the BANDS calculation is that used for the SCF multiplied by this factor.')
spec.input('bands_kpoints', valid_type=orm.KpointsData, required=False,
help='Explicit kpoints to use for the BANDS calculation. Specify either this or `bands_kpoints_distance`.')
spec.input('bands_kpoints_distance', valid_type=orm.Float, required=False,
help='Minimum kpoints distance for the BANDS calculation. Specify either this or `bands_kpoints`.')
spec.inputs.validator = validate_inputs
spec.outline(
cls.setup,
if_(cls.should_run_relax)(
cls.run_relax,
cls.inspect_relax,
),
if_(cls.should_run_seekpath)(
cls.run_seekpath,
),
cls.run_scf,
cls.inspect_scf,
cls.run_bands,
cls.inspect_bands,
cls.results,
)
spec.exit_code(201, 'ERROR_INVALID_INPUT_NUMBER_OF_BANDS',
message='Cannot specify both `nbands_factor` and `bands.pw.parameters.SYSTEM.nbnd`.')
spec.exit_code(202, 'ERROR_INVALID_INPUT_KPOINTS',
message='Cannot specify both `bands_kpoints` and `bands_kpoints_distance`.')
spec.exit_code(401, 'ERROR_SUB_PROCESS_FAILED_RELAX',
message='The PwRelaxWorkChain sub process failed')
spec.exit_code(402, 'ERROR_SUB_PROCESS_FAILED_SCF',
message='The scf PwBasexWorkChain sub process failed')
spec.exit_code(403, 'ERROR_SUB_PROCESS_FAILED_BANDS',
message='The bands PwBasexWorkChain sub process failed')
spec.output('primitive_structure', valid_type=orm.StructureData,
required=False,
help='The normalized and primitivized structure for which the bands are computed.')
spec.output('seekpath_parameters', valid_type=orm.Dict,
required=False,
help='The parameters used in the SeeKpath call to normalize the input or relaxed structure.')
spec.output('scf_parameters', valid_type=orm.Dict,
help='The output parameters of the SCF `PwBaseWorkChain`.')
spec.output('band_parameters', valid_type=orm.Dict,
help='The output parameters of the BANDS `PwBaseWorkChain`.')
spec.output('band_structure', valid_type=orm.BandsData,
help='The computed band structure.')
# yapf: enable
@classmethod
def get_protocol_filepath(cls):
"""Return ``pathlib.Path`` to the ``.yaml`` file that defines the protocols."""
from importlib_resources import files
from ..protocols import pw as pw_protocols
return files(pw_protocols) / 'bands.yaml'
@classmethod
def get_builder_from_protocol(cls, code, structure, protocol=None, overrides=None, **kwargs):
"""Return a builder prepopulated with inputs selected according to the chosen protocol.
:param code: the ``Code`` instance configured for the ``quantumespresso.pw`` plugin.
:param structure: the ``StructureData`` instance to use.
:param protocol: protocol to use, if not specified, the default will be used.
:param overrides: optional dictionary of inputs to override the defaults of the protocol.
:param kwargs: additional keyword arguments that will be passed to the ``get_builder_from_protocol`` of all the
sub processes that are called by this workchain.
:return: a process builder instance with all inputs defined ready for launch.
"""
inputs = cls.get_protocol_inputs(protocol, overrides)
args = (code, structure, protocol)
relax = PwRelaxWorkChain.get_builder_from_protocol(*args, overrides=inputs.get('relax', None), **kwargs)
scf = PwBaseWorkChain.get_builder_from_protocol(*args, overrides=inputs.get('scf', None), **kwargs)
bands = PwBaseWorkChain.get_builder_from_protocol(*args, overrides=inputs.get('bands', None), **kwargs)
relax.pop('structure', None)
relax.pop('clean_workdir', None)
relax.pop('base_final_scf', None)
scf['pw'].pop('structure', None)
scf.pop('clean_workdir', None)
bands['pw'].pop('structure', None)
bands.pop('clean_workdir', None)
bands.pop('kpoints_distance', None)
bands.pop('kpoints_force_parity', None)
builder = cls.get_builder()
builder.structure = structure
builder.relax = relax
builder.scf = scf
builder.bands = bands
builder.clean_workdir = orm.Bool(inputs['clean_workdir'])
builder.nbands_factor = orm.Float(inputs['nbands_factor'])
builder.bands_kpoints_distance = orm.Float(inputs['bands_kpoints_distance'])
return builder
def setup(self):
"""Define the current structure in the context to be the input structure."""
self.ctx.current_structure = self.inputs.structure
self.ctx.current_number_of_bands = None
self.ctx.bands_kpoints = self.inputs.get('bands_kpoints', None)
def should_run_relax(self):
"""If the 'relax' input namespace was specified, we relax the input structure."""
return 'relax' in self.inputs
def should_run_seekpath(self):
"""Seekpath should only be run if the `bands_kpoints` input is not specified."""
return 'bands_kpoints' not in self.inputs
def run_relax(self):
"""Run the PwRelaxWorkChain to run a relax PwCalculation."""
inputs = AttributeDict(self.exposed_inputs(PwRelaxWorkChain, namespace='relax'))
inputs.metadata.call_link_label = 'relax'
inputs.structure = self.ctx.current_structure
running = self.submit(PwRelaxWorkChain, **inputs)
self.report(f'launching PwRelaxWorkChain<{running.pk}>')
return ToContext(workchain_relax=running)
def inspect_relax(self):
"""Verify that the PwRelaxWorkChain finished successfully."""
workchain = self.ctx.workchain_relax
if not workchain.is_finished_ok:
self.report(f'PwRelaxWorkChain failed with exit status {workchain.exit_status}')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_RELAX
self.ctx.current_structure = workchain.outputs.output_structure
self.ctx.current_number_of_bands = workchain.outputs.output_parameters.get_attribute('number_of_bands')
def run_seekpath(self):
"""Run the structure through SeeKpath to get the normalized structure and path along high-symmetry k-points .
This is only called if the `bands_kpoints` input was not specified.
"""
inputs = {
'reference_distance': self.inputs.get('bands_kpoints_distance', None),
'metadata': {
'call_link_label': 'seekpath'
}
}
result = seekpath_structure_analysis(self.ctx.current_structure, **inputs)
self.ctx.current_structure = result['primitive_structure']
self.ctx.bands_kpoints = result['explicit_kpoints']
self.out('primitive_structure', result['primitive_structure'])
self.out('seekpath_parameters', result['parameters'])
def run_scf(self):
"""Run the PwBaseWorkChain in scf mode on the primitive cell of (optionally relaxed) input structure."""
inputs = AttributeDict(self.exposed_inputs(PwBaseWorkChain, namespace='scf'))
inputs.metadata.call_link_label = 'scf'
inputs.pw.structure = self.ctx.current_structure
inputs.pw.parameters = inputs.pw.parameters.get_dict()
inputs.pw.parameters.setdefault('CONTROL', {})['calculation'] = 'scf'
# Make sure to carry the number of bands from the relax workchain if it was run and it wasn't explicitly defined
# in the inputs. One of the base workchains in the relax workchain may have changed the number automatically in
# the sanity checks on band occupations.
if self.ctx.current_number_of_bands:
inputs.pw.parameters.setdefault('SYSTEM', {}).setdefault('nbnd', self.ctx.current_number_of_bands)
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
running = self.submit(PwBaseWorkChain, **inputs)
self.report(f'launching PwBaseWorkChain<{running.pk}> in scf mode')
return ToContext(workchain_scf=running)
def inspect_scf(self):
"""Verify that the PwBaseWorkChain for the scf run finished successfully."""
workchain = self.ctx.workchain_scf
if not workchain.is_finished_ok:
self.report(f'scf PwBaseWorkChain failed with exit status {workchain.exit_status}')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_SCF
self.ctx.current_folder = workchain.outputs.remote_folder
self.ctx.current_number_of_bands = workchain.outputs.output_parameters.get_attribute('number_of_bands')
def run_bands(self):
"""Run the PwBaseWorkChain in bands mode along the path of high-symmetry determined by seekpath."""
inputs = AttributeDict(self.exposed_inputs(PwBaseWorkChain, namespace='bands'))
inputs.metadata.call_link_label = 'bands'
inputs.kpoints = self.ctx.bands_kpoints
inputs.pw.structure = self.ctx.current_structure
inputs.pw.parent_folder = self.ctx.current_folder
inputs.pw.parameters = inputs.pw.parameters.get_dict()
inputs.pw.parameters.setdefault('CONTROL', {})
inputs.pw.parameters.setdefault('SYSTEM', {})
inputs.pw.parameters.setdefault('ELECTRONS', {})
# The following flags always have to be set in the parameters, regardless of what caller specified in the inputs
inputs.pw.parameters['CONTROL']['calculation'] = 'bands'
# Only set the following parameters if not directly explicitly defined in the inputs
inputs.pw.parameters['ELECTRONS'].setdefault('diagonalization', 'cg')
inputs.pw.parameters['ELECTRONS'].setdefault('diago_full_acc', True)
# If `nbands_factor` is defined in the inputs we set the `nbnd` parameter
if 'nbands_factor' in self.inputs:
factor = self.inputs.nbands_factor.value
parameters = self.ctx.workchain_scf.outputs.output_parameters.get_dict()
if int(parameters['number_of_spin_components']) > 1:
nspin_factor = 2
else:
nspin_factor = 1
nbands = int(parameters['number_of_bands'])
nelectron = int(parameters['number_of_electrons'])
nbnd = max(
int(0.5 * nelectron * nspin_factor * factor),
int(0.5 * nelectron * nspin_factor) + 4 * nspin_factor, nbands
)
inputs.pw.parameters['SYSTEM']['nbnd'] = nbnd
# Otherwise set the current number of bands, unless explicitly set in the inputs
else:
inputs.pw.parameters['SYSTEM'].setdefault('nbnd', self.ctx.current_number_of_bands)
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
running = self.submit(PwBaseWorkChain, **inputs)
self.report(f'launching PwBaseWorkChain<{running.pk}> in bands mode')
return ToContext(workchain_bands=running)
def inspect_bands(self):
"""Verify that the PwBaseWorkChain for the bands run finished successfully."""
workchain = self.ctx.workchain_bands
if not workchain.is_finished_ok:
self.report(f'bands PwBaseWorkChain failed with exit status {workchain.exit_status}')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_BANDS
def results(self):
"""Attach the desired output nodes directly as outputs of the workchain."""
self.report('workchain succesfully completed')
self.out('scf_parameters', self.ctx.workchain_scf.outputs.output_parameters)
self.out('band_parameters', self.ctx.workchain_bands.outputs.output_parameters)
self.out('band_structure', self.ctx.workchain_bands.outputs.output_band)
def on_terminated(self):
"""Clean the working directories of all child calculations if `clean_workdir=True` in the inputs."""
super().on_terminated()
if self.inputs.clean_workdir.value is False:
self.report('remote folders will not be cleaned')
return
cleaned_calcs = []
for called_descendant in self.node.called_descendants:
if isinstance(called_descendant, orm.CalcJobNode):
try:
called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access
cleaned_calcs.append(called_descendant.pk)
except (IOError, OSError, KeyError):
pass
if cleaned_calcs:
self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}")
|
the-stack_0_21573 | from logging import log
from pathlib import Path
from re import L
import nipype
from nipype.interfaces.ants import ApplyTransforms
from nipype.interfaces import fsl
import os
def at_ants(in_file, ref, xfm, outfile, nn: bool, invert_xfm: bool = False):
at = ApplyTransforms()
at.inputs.input_image = in_file
at.inputs.reference_image = ref
at.inputs.transforms = xfm
at.inputs.output_image = str(outfile)
if nn:
at.inputs.interpolation = "NearestNeighbor"
if invert_xfm:
at.inputs.invert_transform_flags = True
at.run()
def extract_brain(wholehead: Path, brain: Path, brain_mask: Path):
cmd = f"fslmaths {wholehead} -mul {brain_mask} {brain}"
os.system(cmd)
def epi_reg(in_file: Path, t1: Path, t1_brain: Path, out_file: Path):
cmd = f"epi_reg --epi={in_file} --t1={t1} --t1brain={t1_brain} --out={out_file}"
print(cmd)
os.system(cmd)
def applyxfm_fsl(in_file, xfm, ref, out_file):
ax = fsl.ApplyXFM()
ax.inputs.in_file = in_file
ax.inputs.in_matrix_file = xfm
ax.inputs.reference = ref
ax.inputs.out_file = out_file
ax.run()
def atlas_to_subject_space(
func_derivatives: Path,
atlas_file: Path,
atlas_name: str,
subj: Path,
ses: str,
):
fs_transform = (
func_derivatives
/ subj.name
/ ses
/ "anat"
/ f"{subj.name}_{ses}_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5"
)
ref = fs_transform.with_name(f"{subj.name}_{ses}_desc-preproc_T1w.nii.gz")
out_file = (
subj
/ "registrations"
/ "preprocessed_FS"
/ f"{atlas_name}_native.nii.gz"
)
out_file.parent.mkdir(exist_ok=True, parents=True)
if out_file.exists():
return
# try:
at_ants(atlas_file, ref, fs_transform, out_file, nn=True)
def coreg_to_freesurfer(func_derivatives: Path, subj: Path, ses: str):
subj_id = subj.name
fs_ref = (
func_derivatives
/ subj_id
/ ses
/ "anat"
/ f"{subj_id}_{ses}_desc-preproc_T1w.nii.gz"
)
fs_mask = (
func_derivatives
/ subj_id
/ ses
/ "anat"
/ f"{subj_id}_{ses}_desc-brain_mask.nii.gz"
)
try:
fs_brain = fs_mask.with_name(fs_mask.name.replace("_mask", ""))
if not fs_brain.exists():
### Extract brain ###
extract_brain(fs_ref, fs_brain, fs_mask)
epi_b0 = (
subj
/ "registrations"
/ "mean_b0"
/ "mean_coregistered_mean_b0.nii.gz"
)
longitudinal = True
if not epi_b0.exists():
longitudinal = False
epi_b0 = (
subj / "registrations" / "mean_b0" / "mean_b0_ses-1.nii.gz"
)
out_file = (
subj
/ "registrations"
/ "preprocessed_FS"
/ f"mean_epi2anatomical.nii.gz"
)
if not out_file.exists():
epi_reg(epi_b0, fs_ref, fs_brain, out_file)
aff_2 = out_file.parent / f"{out_file.name.split('.')[0]}.mat"
if longitudinal:
for ses, aff_name in zip(
["ses-1", "ses-2"], ["pre2post", "post2pre"]
):
print("\t", ses)
aff_1 = (
subj
/ "registrations"
/ "mean_b0"
/ f"mean_b0_{aff_name}_half.mat"
)
aff_full = (
subj
/ "registrations"
/ "preprocessed_FS"
/ f"{ses}_epi2anatomical.mat"
)
if not aff_full.exists():
os.system(
f"convert_xfm -omat {aff_full} -concat {aff_2} {aff_1}"
)
inv_aff_full = aff_full.with_name(f"{ses}_anatomical2epi.mat")
if not inv_aff_full.exists():
os.system(
f"convert_xfm -omat {inv_aff_full} -inverse {aff_full}"
)
else:
aff_full = out_file
for param in subj.glob(f"{ses}/tensors*/native/*.mif"):
param_nii = param.with_suffix(".nii.gz")
if not param_nii.exists():
os.system(f"mrconvert {param} {param_nii} -force")
out_file = (
param_nii.parent.parent / "coreg_FS" / param_nii.name
)
out_file.parent.mkdir(exist_ok=True)
print("\t\t", param.name)
if not out_file.exists():
applyxfm_fsl(param_nii, aff_full, fs_brain, out_file)
param_nii.unlink()
except:
return
|
the-stack_0_21574 | #!/usr/bin/env python
# @lint-avoid-python-3-compatibility-imports
#
# opensnoop Trace open() syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: opensnoop [-h] [-T] [-x] [-p PID] [-d DURATION] [-t TID] [-n NAME]
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 17-Sep-2015 Brendan Gregg Created this.
# 29-Apr-2016 Allan McAleavy Updated for BPF_PERF_OUTPUT.
# 08-Oct-2016 Dina Goldshtein Support filtering by PID and TID.
# 28-Dec-2018 Tim Douglas Print flags argument, enable filtering
from __future__ import print_function
from bcc import ArgString, BPF
import argparse
import ctypes as ct
from datetime import datetime, timedelta
import os
# arguments
examples = """examples:
./opensnoop # trace all open() syscalls
./opensnoop -T # include timestamps
./opensnoop -x # only show failed opens
./opensnoop -p 181 # only trace PID 181
./opensnoop -t 123 # only trace TID 123
./opensnoop -d 10 # trace for 10 seconds only
./opensnoop -n main # only print process names containing "main"
./opensnoop -e # show extended fields
./opensnoop -f O_WRONLY -f O_RDWR # only print calls for writing
"""
parser = argparse.ArgumentParser(
description="Trace open() syscalls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-x", "--failed", action="store_true",
help="only show failed opens")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-t", "--tid",
help="trace this TID only")
parser.add_argument("-d", "--duration",
help="total duration of trace in seconds")
parser.add_argument("-n", "--name",
type=ArgString,
help="only print process names containing this name")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("-e", "--extended_fields", action="store_true",
help="show extended fields")
parser.add_argument("-f", "--flag_filter", action="append",
help="filter on flags argument (e.g., O_WRONLY)")
args = parser.parse_args()
debug = 0
if args.duration:
args.duration = timedelta(seconds=int(args.duration))
flag_filter_mask = 0
for flag in args.flag_filter or []:
if not flag.startswith('O_'):
exit("Bad flag: %s" % flag)
try:
flag_filter_mask |= getattr(os, flag)
except AttributeError:
exit("Bad flag: %s" % flag)
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <uapi/linux/limits.h>
#include <linux/sched.h>
struct val_t {
u64 id;
char comm[TASK_COMM_LEN];
const char *fname;
int flags; // EXTENDED_STRUCT_MEMBER
};
struct data_t {
u64 id;
u64 ts;
int ret;
char comm[TASK_COMM_LEN];
char fname[NAME_MAX];
int flags; // EXTENDED_STRUCT_MEMBER
};
BPF_HASH(infotmp, u64, struct val_t);
BPF_PERF_OUTPUT(events);
int trace_entry(struct pt_regs *ctx, int dfd, const char __user *filename, int flags)
{
struct val_t val = {};
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
u32 tid = id; // Cast and get the lower part
PID_TID_FILTER
FLAGS_FILTER
if (bpf_get_current_comm(&val.comm, sizeof(val.comm)) == 0) {
val.id = id;
val.fname = filename;
val.flags = flags; // EXTENDED_STRUCT_MEMBER
infotmp.update(&id, &val);
}
return 0;
};
int trace_return(struct pt_regs *ctx)
{
u64 id = bpf_get_current_pid_tgid();
struct val_t *valp;
struct data_t data = {};
u64 tsp = bpf_ktime_get_ns();
valp = infotmp.lookup(&id);
if (valp == 0) {
// missed entry
return 0;
}
bpf_probe_read(&data.comm, sizeof(data.comm), valp->comm);
bpf_probe_read(&data.fname, sizeof(data.fname), (void *)valp->fname);
data.id = valp->id;
data.ts = tsp / 1000;
data.flags = valp->flags; // EXTENDED_STRUCT_MEMBER
data.ret = PT_REGS_RC(ctx);
events.perf_submit(ctx, &data, sizeof(data));
infotmp.delete(&id);
return 0;
}
"""
if args.tid: # TID trumps PID
bpf_text = bpf_text.replace('PID_TID_FILTER',
'if (tid != %s) { return 0; }' % args.tid)
elif args.pid:
bpf_text = bpf_text.replace('PID_TID_FILTER',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('PID_TID_FILTER', '')
if args.flag_filter:
bpf_text = bpf_text.replace('FLAGS_FILTER',
'if (!(flags & %d)) { return 0; }' % flag_filter_mask)
else:
bpf_text = bpf_text.replace('FLAGS_FILTER', '')
if not (args.extended_fields or args.flag_filter):
bpf_text = '\n'.join(x for x in bpf_text.split('\n')
if 'EXTENDED_STRUCT_MEMBER' not in x)
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="do_sys_open", fn_name="trace_entry")
b.attach_kretprobe(event="do_sys_open", fn_name="trace_return")
TASK_COMM_LEN = 16 # linux/sched.h
NAME_MAX = 255 # linux/limits.h
class Data(ct.Structure):
_fields_ = [
("id", ct.c_ulonglong),
("ts", ct.c_ulonglong),
("ret", ct.c_int),
("comm", ct.c_char * TASK_COMM_LEN),
("fname", ct.c_char * NAME_MAX),
("flags", ct.c_int),
]
initial_ts = 0
# header
if args.timestamp:
print("%-14s" % ("TIME(s)"), end="")
print("%-6s %-16s %4s %3s " %
("TID" if args.tid else "PID", "COMM", "FD", "ERR"), end="")
if args.extended_fields:
print("%-9s" % ("FLAGS"), end="")
print("PATH")
# process event
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
global initial_ts
# split return value into FD and errno columns
if event.ret >= 0:
fd_s = event.ret
err = 0
else:
fd_s = -1
err = - event.ret
if not initial_ts:
initial_ts = event.ts
if args.failed and (event.ret >= 0):
return
if args.name and bytes(args.name) not in event.comm:
return
if args.timestamp:
delta = event.ts - initial_ts
print("%-14.9f" % (float(delta) / 1000000), end="")
print("%-6d %-16s %4d %3d " %
(event.id & 0xffffffff if args.tid else event.id >> 32,
event.comm.decode('utf-8', 'replace'), fd_s, err), end="")
if args.extended_fields:
print("%08o " % event.flags, end="")
print(event.fname.decode('utf-8', 'replace'))
# loop with callback to print_event
b["events"].open_perf_buffer(print_event, page_cnt=64)
start_time = datetime.now()
while not args.duration or datetime.now() - start_time < args.duration:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
the-stack_0_21575 | from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core.config import settings
app = FastAPI(
title=settings.PROJECT_NAME, openapi_url=f"{settings.API_V1_STR}/openapi.json"
)
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=settings.API_V1_STR) |
the-stack_0_21577 | import xml.etree.ElementTree
import fractions
import os
import collections
from collections import defaultdict
import fractions
import midi_to_statematrix
import math
lowerBound = 24
upperBound = 102
numPitches = upperBound - lowerBound
#get the "divisions" which is the number of time
#units per beat
def getDivisions(e):
divisions_val = None
retval = {}
for part in e:
if part.tag == 'part':
partName = part.attrib['id']
for measure in part:
if measure.tag == 'measure':
for attributes in measure:
if attributes.tag == 'attributes':
for divisions in attributes:
if divisions.tag == 'divisions':
divs = int(divisions.text)
retval[partName] = divs
if divisions_val == None:
divisions_val = divs
return retval
#if it's a rest, return the
#duration, otherwise return none
def getRestLength(note):
duration = None
isRest = False
for el in note:
if el.tag == 'rest':
isRest = True
elif el.tag == 'duration':
if duration == None:
duration = int(el.text)
else:
#found duration tag twice
print("Duration tag found twice for note...")
if isRest:
if duration == None:
#problem...
print("Rest with no duration found")
else:
return duration
else:
#it's not a rest; return none
return None
#return the duration for a backup element
def getBackupLength(backup):
duration = None
for el in backup:
if el.tag == 'duration':
if duration == None:
duration = int(el.text)
else:
#found duration tag twice
print("Duration tag found twice for note...")
return duration
def xmlPitchToMidiPitch(letter, octave, alter):
table = {
"C" : 0,
"D" : 2,
"E" : 4,
"F" : 5,
"G" : 7,
"A" : 9,
"B" : 11,
}
if not letter in table.keys():
print("Letter {0} is not a valid letter A-G".format(letter))
return 12 + table[letter] + 12 * octave + alter
#get pitch, and duration for a note
def getNoteInfo(note, measureNum):
duration = None
step = None
octave = None
alter = None
isRest = False
isChord = False
tieType = None
for el in note:
if el.tag == 'rest':
isRest = True
elif el.tag == 'duration':
if duration == None:
duration = int(el.text)
else:
#found duration tag twice
print("Duration tag found twice for note...")
elif el.tag == 'chord':
isChord = True
elif el.tag == 'tie':
tieType = el.attrib['type']
elif el.tag == 'pitch':
for pitchel in el:
if pitchel.tag == 'step':
if step == None:
step = pitchel.text
else:
#found step tag twice
print("step tag found twice for note...")
if pitchel.tag == 'octave':
if octave == None:
octave = int(pitchel.text)
else:
#found octave tag twice
print("octave tag found twice for note...")
if pitchel.tag == 'alter':
if alter == None:
alter = int(pitchel.text)
else:
#found alter tag twice
print("alter tag found twice for note...")
if isRest:
#if it's a rest, then return None
return None
else:
if duration == None:
#this can happen for grace notes so actually just return none
return None
elif step == None:
print("Note with no step found")
elif octave == None:
print("Note with no octave found")
if alter == None:
alter = 0
midiPitch = xmlPitchToMidiPitch(step, octave, alter)
return (midiPitch, duration, isChord, tieType)
def iterateThroughMusic(e, handleNote, handleMeasure = None, handleRest = None, handlePart = None):
#for legacy reasons
resolution = 1
for part in e:
if part.tag == 'part':
partName = part.attrib['id']
if handlePart != None:
handlePart(partName)
#keep track of the current time
timePos = 0
measureNum = 0
lastNoteTimePos = 0
for measure in part:
if handleMeasure != None:
handleMeasure()
if measure.tag == 'measure':
#remember measure start time
#measureStartTime = timePos
#record latest time
latestTime = timePos
for note in measure:
if note.tag == 'note':
res = getRestLength(note)
if res == None:
#it's a note
res = getNoteInfo(note, measureNum)
if res == None:
#this can happen for grace notes, for example,
#just ignore
continue
midiPitch, duration, isChord, tieType = res
#allNotes[timePos, (midiPitch, duration)]
if timePos % resolution == 0:
if isChord:
timePosForNote = lastNoteTimePos
else:
timePosForNote = timePos
if tieType != 'stop':
handleNote(timePosForNote / resolution, midiPitch, (duration - 1) / resolution + 1, partName)
if not isChord:
lastNoteTimePos = timePos
timePos += duration
else:
#it's a rest
duration = res
if handleRest != None:
handleRest(timePos, duration)
timePos += duration
elif note.tag == 'backup':
duration = getBackupLength(note)
timePos -= duration
if timePos > latestTime:
latestTime = timePos
timePos = latestTime
#look under the current node and return
#the first node with the given name, if
#it exists
def getNodesUnderNodeWithName(node, name):
retlist = []
for el in node:
if el.tag == name:
retlist.append(el)
retlist = retlist + getNodesUnderNodeWithName(el, name)
return retlist
#look under the current node and return
#the first node with the given name, if
#it exists
def getNodeUnderNodeWithName(node, name):
thelist = getNodesUnderNodeWithName(node, name)
if thelist:
return thelist[0]
else:
return None
# for el in node:
# if el.tag == name:
# return el
# else:
# res = getNodeUnderNodeWithName(el, name)
# if res != None:
# return res
# return None
#parse XML to find the tempo. Note that for some songs,
#no tempo will exists, in which case return None. Also,
#for some songs, there will be multiple tempos, in which
#case probably just return the first one found.
def getTempoForSong(tree):
soundNodes = getNodesUnderNodeWithName(tree, 'sound')
for soundNode in soundNodes:
if 'tempo' in soundNode.attrib.keys():
return int(round(float(soundNode.attrib['tempo'])))
return None
#return hashmap of part to int, where the int
#is the amount to transpose each part in half steps.
#if there is no transposition for a given part, it
#can be omitted from the hash map
def getTranspositions(tree):
ret = {}
parts = getNodesUnderNodeWithName(tree, 'part')
for part in parts:
if 'id' in part.attrib.keys():
partId = part.attrib['id']
transposeNode = getNodeUnderNodeWithName(part, 'transpose')
if transposeNode != None:
for chromatic in transposeNode:
if chromatic.tag == 'chromatic':
ret[partId] = int(chromatic.text)
break
return ret
#we'll put this in its own routine, basically, the problem is,
#suppose a beat can be divided into div1 divisions and div2
#divisions. Suppose num specifies a point in time in divisions
#along the first scale. Can it be translated to a point in
#time in units of the second scale? If so, what is the number
#of units (everything must be an integer)
#In our code, this will be used to translate notes from "divs"
#(time unit of XML file) to "slices" (time unit of statematrix)
#If the note can't be translated then it is lost
def translateToDifferentDivScale(num, divs1, divs2):
theGcd = fractions.gcd(divs1, divs2)
if num % (divs2/theGcd) != 0:
#we can't translate it
return None
else:
return num * divs2 / divs1
#parses XML, delivering events to the callback
#that indicate note locations/durations in
#slices. This can be used as a basis for parsing
#XML into various specific data structures
#also, this function returns a number indicating
#the number of slices that are actually a pickup
def parseXMLToSomething(xmltree, noteCreationCallback):
#examine tree for any transpositions
transpositions = getTranspositions(xmltree)
#examine tree for tempo
tempo = getTempoForSong(xmltree)
if tempo == None:
raise ValueError("can't produce state matrix for this XML, as there is no tempo")
#also, check music to see if there's a pickup.
#To do this, we look at the first two measures,
#if the lengths are different (as can be determined
#by looking at the notes and rests) then we have a
#nonzero pickup, which is the length of the first measure
class PickupLengthHandler:
def __init__(self):
self.measureNum = 0
self.latestTimeSeen = 0
self.measureLengths = [0, 0]
def __handleSomething(self, time, duration):
if self.measureNum == 1 or self.measureNum == 2:
index = self.measureNum - 1
if time + duration > self.measureLengths[index]:
self.measureLengths[index] = time + duration
def __call__(self, time, pitch, duration, part):
self.__handleSomething(time, duration)
def handleMeasure(self):
self.measureNum += 1
def handleRest(self, timePos, duration):
self.__handleSomething(timePos, duration)
def handlePart(self, partName):
self.partName = partName
def getPickupDivisions(self):
if self.measureLengths[0] == self.measureLengths[1]:
return 0
else:
return self.measureLengths[0]
plm = PickupLengthHandler()
iterateThroughMusic(xmltree, plm, plm.handleMeasure, plm.handleRest, plm.handlePart)
pickupDivisions = plm.getPickupDivisions()
pickupDivisionsPart = plm.partName
#This is a constant, but actually it should be an input parameter. Anyways,
#given the tempo, the secondsPerSlice, and the divisions per beat, we should
#be able to figure out how divisions in the input correspond to slices in the
#output
secondsPerSlice = 0.125
beatsPerMinute = float(tempo)
beatsPerSecond = beatsPerMinute / 60
#e = xml.etree.ElementTree.parse(xmlfile).getroot()
e = xmltree
#returns hashmap, part to divisions number
divisions = getDivisions(e)
#compute lcm of divisions over various parts, this
#will be the divisions we use
divisionsLCM = None
for k in divisions.keys():
thisDiv = divisions[k]
if divisionsLCM == None:
divisionsLCM = thisDiv
else:
divisionsLCM = (thisDiv * divisionsLCM)/fractions.gcd(thisDiv, divisionsLCM)
#use divisions now to translate the pickup divisions for the given part, not all
#parts use the same division scale, so use the LCM scale
pickupDivisions *= (divisionsLCM/divisions[pickupDivisionsPart])
divisionsPerBeat = divisionsLCM
#this will be an exact floating point number
slicesPerBeat = 1 / (beatsPerSecond * secondsPerSlice)
#we require that the number of slices for a beat be an integer which
#is a power of two. To do this, we'll take the log base 2, round
#to the nearest int, then compute inverse log
slicesPerBeat = int(2**(int(round(math.log(slicesPerBeat, 2)))))
#compute gcd of slices per beat and divisions per beat
slicesDivisionsGcd = fractions.gcd(slicesPerBeat, divisionsPerBeat)
#we require that for a note to be resolved to slices, it's time in
#divisions must be divisible by this number
divisionsDivisor = divisionsPerBeat / slicesDivisionsGcd
#compute the size of the pickup in slices, this is information
#that will be needed for neural net training
pickupSlices = pickupDivisions * slicesPerBeat / divisionsPerBeat
def handleNote_createStateMatrix(time, pitch, duration, part):
#if part == 'P2':
pitch
if part in transpositions.keys():
pitch += transpositions[part]
#Sometimes different parts have different
#numbers of divisions, scale so that the time/
#duration is in terms of the LCM divisions
if divisions[part] != divisionsLCM:
scalingFactor = (divisionsLCM / divisions[part])
time *= scalingFactor
duration *= scalingFactor
#time and duration are in divisions, we need them in slices
if time % divisionsDivisor != 0:
#this note doesn't fall on a slice boundary so we just skip it
return
else:
time = time * slicesPerBeat / divisionsPerBeat
duration = duration * slicesPerBeat / divisionsPerBeat
if duration == 0:
duration = 1
noteCreationCallback(time, pitch, duration)
#ad hoc--if divisions are divisible by 3, then assume
#that the division is at the lowest level for the piece,
#we set the granularity to ignore this subdivision level
iterateThroughMusic(e, handleNote_createStateMatrix)
return pickupSlices
#wrapper that takes filename instead of tree
def parseXMLFileToSomething(xmlFile, noteCreationCallback):
tree = xml.etree.ElementTree.parse(xmlFile).getroot()
return parseXMLToSomething(tree, noteCreationCallback)
def stateMatrixForSong(tree):
stateMatrix = []
def handleNoteCreation(time, pitch, duration):
#for state matrices, we shift pitch down
#by lower bound constant
pitch -= lowerBound
#if necessary, extend state matrix so
#that the desired times exists
#last time needed is time + duration - 1,
#len <= last time needed, so...
while len(stateMatrix) < time + duration:
row = numPitches * [[0, 0]]
stateMatrix.append(row)
stateMatrix[time][pitch] = [1, 1]
for i in range(time + 1, time + duration):
if stateMatrix[i][pitch] == [0, 0]:
stateMatrix[i][pitch] = [1, 0]
pickupSlices = parseXMLToSomething(tree, handleNoteCreation)
return (pickupSlices, stateMatrix)
def createStateMatrices(basedir = 'musicxml', minslices = 0):
stateMatrices = {}
if not os.path.exists(basedir):
os.makedirs(basedir)
for theFile in os.listdir(os.getcwd() + '/' + basedir):
if not theFile.split('.')[-1] == 'xml':
continue
#parse xml file into document tree
print(basedir + '/' + theFile)
tree = xml.etree.ElementTree.parse(basedir + '/' + theFile).getroot()
if getTempoForSong(tree) == None:
print("File {} has no tempo!!!".format(theFile))
else:
sm = stateMatrixForSong(tree)
songMatrix = sm[1]
if len(songMatrix) < minslices:
print("File {} omitted, it is too short.".format(theFile))
else:
stateMatrices[theFile] = sm
return stateMatrices
#NOTE: INTERFACE CHANGED--now returns 0 on success,
#1 on failure, reason for failure is that there is
#actually no tempo information in the xml file, so
#we don't know how to convert to midi
def midiForXML(xmlFile, midiDestFile):
#parse xml file into document tree
tree = xml.etree.ElementTree.parse(xmlFile).getroot()
tempo = getTempoForSong(tree)
#We're no longer using a default tempo, this was never
#really a good idea, since actually the various tempos
#can differ by an order of magnitued, instead, we return
#a code to indicate success or failure.
#if tempo == None:
# tempo = 120
if tempo == None:
return 1
else:
stateMatrix = stateMatrixForSong(tree, 0)[1]
midi_to_statematrix.noteStateMatrixToMidi(stateMatrix, name=midiDestFile)
return 0
#NO LONGER USED!!!!
def createStateMatrices_old():
basedir = "musicxml/"
f = open(basedir + 'catalog.txt', "r")
lines = f.readlines()
f.close()
stateMatrices = {}
#function that returns the default
#value of a state matrix
def defaultValFactory():
return [0, 0]
inBlockComment = False
while lines:
line = lines[0]
del lines[0]
if len(line) > 0 and line[0] == '#':
continue
toks = line.split()
if len(toks) == 0:
continue
if inBlockComment:
if toks[0] == 'endcomment':
inBlockComment = False
continue
if toks[0] == 'begincomment':
inBlockComment = True
continue
if len(toks) == 2 and toks[0] == 'file':
pass
else:
continue
origFilename = toks[1]
mxlfile = basedir + origFilename
print(mxlfile)
transpositions = {}
slow = None
speed = None
startTime = 0
while lines and len(lines[0].split()) != 0 and lines[0].split()[0] != 'file':
line = lines[0]
del lines[0]
toks = line.split()
if toks[0] == 'transpose':
if not len(toks) == 3:
continue
transpositions[toks[1]] = int(toks[2])
elif toks[0] == 'slow':
if not len(toks) == 2:
continue
slow = int(toks[1])
elif toks[0] == 'speed':
if not len(toks) == 2:
continue
speed = int(toks[1])
elif toks[0] == 'start-time':
if not len(toks) == 2:
continue
startTime = float(toks[1])
#parse xml file into document tree
tree = xml.etree.ElementTree.parse(mxlfile).getroot()
if getTempoForSong(tree) == None:
print("File {} has no tempo!!!".format(mxlfile))
else:
stateMatrices[origFilename] = stateMatrixForSong(tree)
return stateMatrices
if __name__ == "__main__":
stateMatrices = createStateMatrices()
print("{0} songs total.".format(len(stateMatrices)))
for k in stateMatrices.keys():
midi_to_statematrix.noteStateMatrixToMidi(stateMatrices[k][1], name='./midi_output_test/{}'.format(k))
|
the-stack_0_21578 | #Code for the validation of trained model on Q
#Apr 2021 Renjie Li, NOEL @ CUHK SZ
import torch
import torchvision
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import pandas as pd
import numpy as np
import h5py
import torchvision.transforms as transforms
from datetime import datetime
class TensorsDataset(torch.utils.data.Dataset):
'''
A simple loading dataset - loads the tensor that are passed in input. This is the same as
torch.utils.data.TensorDataset except that you can add transformations to your data and target tensor.
Target tensor can also be None, in which case it is not returned.
'''
def __init__(self, data_tensor, target_tensor=None, transforms=None, target_transforms=None):
if target_tensor is not None:
assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
if transforms is None:
transforms = []
if target_transforms is None:
target_transforms = []
if not isinstance(transforms, list):
transforms = [transforms]
if not isinstance(target_transforms, list):
target_transforms = [target_transforms]
self.transforms = transforms
self.target_transforms = target_transforms
def __getitem__(self, index):
data_tensor = self.data_tensor[index]
for transform in self.transforms:
data_tensor = transform(data_tensor)
if self.target_tensor is None:
return data_tensor
target_tensor = self.target_tensor[index]
for transform in self.target_transforms:
target_tensor = transform(target_tensor)
return data_tensor, target_tensor
def __len__(self):
return self.data_tensor.size(0)
#read data from mat file
print("loading the mat")
f = h5py.File('/Users/Renjee/Desktop/CUHK/NOEL/Deep learning proj/code/L3_dataset/Input_v.mat','r')
data = f['Input']
Input = np.array(data) # For converting to a NumPy array
f = h5py.File('/Users/Renjee/Desktop/CUHK/NOEL/Deep learning proj/code/L3_dataset/Output_v.mat','r')
data = f['QnV']
Output = np.array(data) # For converting to a NumPy array
print("converting to tensor")
input_tensor = torch.tensor(Input)
output_tensor = torch.tensor(Output)
#swap the axes
input_tensor = input_tensor.permute(3,2,1,0).float()
output_tensor = output_tensor.permute(1,0).float()
output_tensor = output_tensor[:,0] #do Q first
output_tensor = output_tensor.view(-1,1) #correct the dimension
print(output_tensor[-1])
print(input_tensor.shape)
print(output_tensor.shape)
#produce the full dataset
transformer=transforms.Normalize(mean=[-8.7270e-13,3.3969e-13,-1.6978e-12], std=[0.0000000005,0.0000000005,0.0000000005])
dataset=TensorsDataset(input_tensor, output_tensor,transforms=transformer)
#split into training and test datasets
train_size = 0
test_size = len(output_tensor)
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
#load the data
test_loader = torch.utils.data.DataLoader(dataset, batch_size=len(output_tensor), shuffle=False)
#set up the network
#create a class for the CNN
class Net(nn.Module):
#build the network (cnn+fc)
def __init__(self):
super(Net,self).__init__()
self.conv1 = nn.Conv2d(3,20, kernel_size=(3,3), padding = 1, bias=False)
self.bn1=nn.BatchNorm2d(20)
self.conv2 = nn.Conv2d(20,40,kernel_size=(3,3),bias=False)
self.bn2=nn.BatchNorm2d(40)
self.fc1 = nn.Linear(240,120)
self.fc2 = nn.Linear(120,50)
self.fc3 = nn.Linear(50,1)
#pass data to the CNN. x represents the data
def forward(self,x):
x = F.relu(F.avg_pool2d(self.conv1(x),(1,2)))
# print(x.shape)
x = F.relu(F.avg_pool2d(self.conv2(x),(1,2)))
# print(x.shape)
x = x.view(x.size(0),-1)
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
network = Net()
network_state_dict = torch.load('/Users/Renjee/Desktop/CUHK/NOEL/Deep learning proj/code/L3_model.pt')
network.load_state_dict(network_state_dict)
test_losses = [] #for Q
testV_losses = [] #for V
test_output = []
testV_output = []
test_target = []
testV_target = []
pred_error = []
#test loop
def test():
#global test_output
network.eval()
with torch.no_grad(): #disable the gradient computation
for data, target in test_loader:
output = network(data)
#save the test result
#Q
test_output.append(output)
test_target.append(target)
pred_err = 100*torch.abs((output - target))/target
pred_error.append(pred_err)
#print('pred errors...')
#print(pred_err)
start=datetime.now()
for epoch in range(0,1):
test()
#print('Q predicted/true values...')
#print(test_output,test_target)
print((datetime.now()-start))
#convert from list to tensor
pred_errorT = torch.cat(pred_error,0)
pred_errorA = pred_errorT.numpy()
print(min(pred_errorA))
red_square = dict(markerfacecolor='r', marker='s')
fig, ax = plt.subplots()
ax.boxplot(pred_errorA, flierprops=red_square, vert=False)
plt.savefig('/Users/Renjee/Desktop/CUHK/NOEL/Deep learning proj/code/valid_boxplot.eps')
fig = plt.figure()
plt.hist(pred_errorA, 50, density=False)
plt.xlim(-0.05,0.80)
plt.xticks(np.arange(0, 0.85, 0.05))
plt.savefig('/Users/Renjee/Desktop/CUHK/NOEL/Deep learning proj/code/valid_hist.eps')
print(np.amin(pred_errorA),np.mean(pred_errorA), np.median(pred_errorA)) |
the-stack_0_21579 | import pandas as pd
import requests
import zipfile
from datetime import datetime
from pytz import timezone
from io import BytesIO
import xmltodict
import time
import os
from argparse import ArgumentParser
# from collections import OrderedDict as odict
def parse_args():
parser = ArgumentParser()
parser.add_argument('--node', type = str, help = "a CAISO node name")
parser.add_argument('-m', '--market', type = str, help = "string: RT5, RT15, or DA")
parser.add_argument('-s', '--startdate', type = str, help = "a string parsable by pandas as a datetime")
parser.add_argument('-e', '--enddate', type = str, help = "a string parsable by pandas as a datetime")
parser.add_argument('-p', '--store_path', type = str, default = os.path.dirname(__file__),
help="a string representing the directory in which we will create the resulting data file")
parser.add_argument('--tz_in', type = str, default = 'US/Pacific', help = 'the timezone of your input args')
parser.add_argument('--tz_query', type = str, default = 'US/Pacific',
help = 'the timezone of your desired query params')
parser.add_argument('--max_n_attempts', type = int, default = 5,
help = 'how many times we will try to run a query before giving up')
# by default save the results next to the current file
args = parser.parse_args()
assert args.market in ('RT5', 'RT15', 'DA')
# process / validate datetime arguments
startdate_pd = pd.to_datetime(args.startdate).tz_localize(args.tz_in)
enddate_pd = pd.to_datetime(args.enddate).tz_localize(args.tz_in)
args.startdate = datetime(year=startdate_pd.year, month=startdate_pd.month, day=startdate_pd.day)
args.enddate = datetime(year=enddate_pd.year, month=enddate_pd.month, day=enddate_pd.day)
return args
def format_time(dtime, tz_in='US/Pacific', tz_out='US/Pacific'):
"""format a datetime.datetime (in as tz-naive, implicitly tz_in, out as tz_out) for a CAISO OASIS API query"""
# Sometimes it seems that local time works, and other times UTC works. Could be Descartes' evil genius messing w me
# again, or perhaps because for some markets we are restricted to be within a single day
dtime = timezone(tz_in).localize(dtime)
if tz_out != tz_in: # convert to desired query timezone if for some reason you care for them to differ
dtime = dtime.astimezone(timezone(tz_out))
return dtime.strftime("%Y%m%dT%H:%M%z")
def get_query_params(node='SLAP_PGEB-APND',
market='RT5',
startdate=datetime(2019, 1, 1),
enddate=datetime(2019, 1, 15),
tz_in='US/Pacific',
tz_out='US/Pacific'):
"""Create a dictionary of query parameters for single query based on arguments. market = RT5, RT15, or DA"""
assert market in ('RT5', 'RT15', 'DA')
params = {'node': node,
'version': 1,
'startdatetime': format_time(startdate, tz_in=tz_in, tz_out=tz_out),
'enddatetime': format_time(enddate, tz_in=tz_in, tz_out=tz_out),
'resultformat': 6} # 6 is CSV
if (datetime.now() - startdate) > pd.Timedelta(days=39 * 30.3):
# CAISO data retention policy as of late 2017 -- no bueno
print("Watch out! CAISO does not retain data over 39 months old, and your startdate seems to be older.")
if market == 'DA': # these querynames only work with these market_run_ids:
params['queryname'] = 'PRC_LMP'
params['market_run_id'] = 'DAM'
elif market == 'RT5':
params['queryname'] = 'PRC_INTVL_LMP'
params['market_run_id'] = 'RTM'
if enddate - startdate > pd.Timedelta(days=1):
print(
"Watch out! real-time market queries may be (?) "
"restricted to a single 24 hour period, and yours is not!")
elif market == 'RT15':
params['queryname'] = 'PRC_RTPD_LMP'
params['market_run_id'] = 'RTPD'
if enddate - startdate > pd.Timedelta(days=15):
print("Watch out! real-time market queries may be (?) restricted by length, and yours kinda long!")
return params
def scrape_singlezip(params):
"""Make a single API URL request using a parameter dictionary created e.g. by get_query_params"""
r = requests.get('http://oasis.caiso.com/oasisapi/SingleZip', params=params)
try:
zf = zipfile.ZipFile(BytesIO(r.content))
except Exception as e:
print(f"Could not load zipfile for following query: exception is {e}")
for item_ in params.items(): # print out the query details
print(f"{item_[0]}: {item_[1]}")
return r
try:
return pd.read_csv(BytesIO(zf.read(zf.namelist()[0])), parse_dates=[0, 1, 2])
except Exception as e:
print(f"Could not parse zipfile as CSV: (exception is {e})")
xml_dict = xmltodict.parse(BytesIO(zf.read(zf.namelist()[0])))
error_msg = xml_dict['m:OASISReport']['m:MessagePayload']['m:RTO']['m:ERROR']['m:ERR_DESC']
print(f"Could not parse as CSV. Error message is '{error_msg}'")
for item_ in params.items(): # print out the query details
print(f"{item_[0]}: {item_[1]}")
return None # this will allow an append process to continue if necessary
def scrape_daterange(node='SLAP_PGEB-APND', # 'SLAP_PGEB-APND', 'PGEB-APND'
startdate=datetime(2017, 1, 1),
enddate=datetime(2017, 12, 31),
market='RT5',
tz_in='US/Pacific',
tz_query='UTC',
store_path=None,
cache_continuously=True,
max_n_attempts = 5):
"""
Breaks up a daterange into appropriate chunks and gets them.
cache_continuously=True is less efficient but will will always save the sorted result after each query.
after each block is successfully retrieved or we make max_n_attempts for it, we stop
"""
assert market in ("RT5", "RT15", "DA")
if store_path is None:
store_path = '.'
chunk_period = {'RT5': 1, 'RT15': 15, 'DA': 30}[market] # different markets have different allowable query sizes
chunk_starts = pd.date_range(start=startdate, end=enddate, freq=f'{chunk_period}D')
print(f"Query range starts = {chunk_starts}")
attempt_srs = pd.Series(index=chunk_starts, data=0)
completion_srs = pd.Series(index=chunk_starts, data=False)
result_freq = {'RT5': 5, 'RT15': 15, 'DA': 60}[market] # will use this for validating results
result_srs = pd.Series()
results_dict = {}
i = 0
while not completion_srs.all():
# >= 0 means we have not succeeded, < max_n_attempts means we shouldn't give up if not
# if there are any that we have not succeeded with or tried enough times, we trudge on
if not completion_srs[i]:
# we do not have the data for this range
# print(f"i, i+1 = {i}, {i + 1}")
# print(f"chunk_starts[i]={chunk_starts[i]}")
ts = datetime(chunk_starts[i].year, chunk_starts[i].month, chunk_starts[i].day)
if enddate - ts > pd.Timedelta(days=chunk_period):
# we are not at the last startdate
te = datetime(chunk_starts[i + 1].year, chunk_starts[i + 1].month, chunk_starts[i + 1].day)
else:
te = enddate
print(f"Querying {ts:%Y-%m-%d} to {te:%Y-%m-%d}, attempt number {attempt_srs[ts]+1} of {max_n_attempts}")
params = get_query_params(node=node, # 'SLAP_PGEB-APND', 'PGEB-APND'
startdate=ts,
enddate=te,
market=market,
tz_in=tz_in,
tz_out=tz_query)
df = scrape_singlezip(params)
pricecol = {'DA': 'MW', 'RT5': 'MW', 'RT15': 'PRC'}[market] # name of the column containing our LMPs
try:
df2 = df.set_index('INTERVALSTARTTIME_GMT', drop=True)[['LMP_TYPE', pricecol]].sort_index()
# get the series
# in this application I don't care about the marginal cost of congestion, losses, etc,
# so I only take the total LMP, not the components
result_srs = df2[df2['LMP_TYPE'] == 'LMP'][pricecol]
results_dict[chunk_starts[i]] = result_srs
assert not result_srs.isna().any()
attempt_srs[ts] = -1 # -1 means we have succeeded
print(f'Success! (it would seem)')
except Exception as e:
print(f'Failed for startdate {ts:%Y-%m-%d} with exception {e}')
attempt_srs[ts] += 1 # mark a consecutive failed attempt for this chunk
if i < len(chunk_starts) - 1:
i += 1
else: # start over to collect missing data
i = 0
time.sleep(5) # don't want the OASIS API to lock us out
if cache_continuously or completion_srs.all():
# very inefficient to keep redoing the concatenation from scratch, but OTOH if we don't cache continuously
# then it is *more* efficient to do it this way. Anyway waiting between queries to avoid getting locked out
# probably takes the majority of the time
try:
result_srs = pd.concat(results_dict.values()).sort_index()
fpath = os.path.join(store_path, f'./LMP_{node}_{market}_{startdate.date()}_{enddate.date()}.csv')
result_srs.to_csv(fpath, header=True)
print(f"wrote file to {fpath}")
except Exception as e:
print("could not concatenate results, presumably because there are none")
print(f"exception is: {e}")
# TODO: add a validation step with an expected DatetimeIndex of freq = result_freq
# completion criterion for the chunk is that we have succeeded or tried enough times:
completion_srs[ts] = (attempt_srs[ts] < 0 or attempt_srs[ts] >= max_n_attempts)
return result_srs
def main(args):
node = args.node
startdate = args.startdate
enddate = args.enddate
market = args.market
tz_in = args.tz_in
tz_query = args.tz_query
store_path = args.store_path
max_n_attempts = args.max_n_attempts
result = scrape_daterange(node=node, # 'SLAP_PGEB-APND',
startdate=startdate, # datetime(2017, 1, 1),
enddate=enddate, # datetime(2017, 1, 16),
market=market, # 'RT15',
tz_in=tz_in,
tz_query=tz_query,
store_path=store_path,
max_n_attempts=max_n_attempts)
if __name__ == '__main__':
# example command line params:
# --node "DLAP_SCE-APND" --startdate "2017-03-29" --enddate "2019-10-20" --market "RT5"
# --node "DLAP_SCE-APND" --startdate "2019-06-01" --enddate "2020-06-08" --market "DA" --max_n_attempts 3 --tz_in "UTC" --tz_query "UTC"
# --node "TH_SP15_GEN_ONPEAK-APND" or "TH_SP15_GEN-APND"
args = parse_args()
main(args)
|
the-stack_0_21580 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
def testNonBatchMatrix(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
transposed = tf.matrix_transpose(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
def testBatchMatrix(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
transposed = tf.matrix_transpose(batch_matrix)
self.assertEqual((2, 3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
def testNonBatchMatrixDynamicallyDefined(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
matrix_ph = tf.placeholder(tf.int32)
transposed = tf.matrix_transpose(matrix_ph)
self.assertAllEqual(
expected_transposed,
transposed.eval(feed_dict={matrix_ph: matrix}))
def testBatchMatrixDynamicallyDefined(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
batch_matrix_ph = tf.placeholder(tf.int32)
transposed = tf.matrix_transpose(batch_matrix_ph)
self.assertAllEqual(
expected_transposed,
transposed.eval(feed_dict={batch_matrix_ph: batch_matrix}))
def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
vector = [1, 2, 3]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "should be a "):
tf.matrix_transpose(vector)
class BooleanMaskTest(test_util.TensorFlowTestCase):
def CheckVersusNumpy(self, ndims_mask, arr_shape, make_mask=None):
"""Check equivalence between boolean_mask and numpy masking."""
if make_mask is None:
make_mask = lambda shape: np.random.randint(0, 2, size=shape).astype(bool)
arr = np.random.rand(*arr_shape)
mask = make_mask(arr_shape[: ndims_mask])
masked_arr = arr[mask]
with self.test_session():
masked_tensor = array_ops.boolean_mask(arr, mask)
np.testing.assert_allclose(
masked_arr,
masked_tensor.eval(),
err_msg="masked_arr:\n%s\n\nmasked_tensor:\n%s" % (
masked_arr, masked_tensor.eval()))
masked_tensor.get_shape().assert_is_compatible_with(masked_arr.shape)
self.assertSequenceEqual(
masked_tensor.get_shape()[1:].as_list(),
masked_arr.shape[1:],
msg="shape information lost %s -> %s" % (
masked_arr.shape, masked_tensor.get_shape()))
def testOneDimensionalMask(self):
# Do 1d separately because it's the only easy one to debug!
ndims_mask = 1
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape)
def testMultiDimensionalMask(self):
for ndims_mask in range(1, 4):
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape)
def testEmptyOutput(self):
make_mask = lambda shape: np.zeros(shape, dtype=bool)
for ndims_mask in range(1, 4):
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape, make_mask=make_mask)
def testWorksWithDimensionsEqualToNoneDuringGraphBuild(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.test_session() as sess:
ph_tensor = array_ops.placeholder(dtypes.int32, shape=None)
ph_mask = array_ops.placeholder(dtypes.bool, shape=[None])
arr = np.array([[1, 2], [3, 4]])
mask = np.array([False, True])
masked_tensor = sess.run(
array_ops.boolean_mask(ph_tensor, ph_mask),
feed_dict={ph_tensor: arr, ph_mask: mask})
np.testing.assert_allclose(masked_tensor, arr[mask])
def testMaskDimensionsSetToNoneRaises(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.test_session():
tensor = array_ops.placeholder(dtypes.int32, shape=[None, 2])
mask = array_ops.placeholder(dtypes.bool, shape=None)
with self.assertRaisesRegexp(ValueError, "dimensions must be specified"):
array_ops.boolean_mask(tensor, mask)
def testMaskHasMoreDimsThanTensorRaises(self):
mask = [[True, True], [False, False]]
tensor = [1, 2, 3, 4]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskIsScalarRaises(self):
mask = True
tensor = 1
with self.test_session():
with self.assertRaisesRegexp(ValueError, "mask.*scalar"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self):
mask = [True, True, True]
tensor = [[1, 2], [3, 4]]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
class OperatorShapeTest(test_util.TensorFlowTestCase):
def testExpandScalar(self):
scalar = "hello"
scalar_expanded = array_ops.expand_dims(scalar, [0])
self.assertEqual(scalar_expanded.get_shape(), (1,))
def testSqueezeScalar(self):
scalar = "hello"
scalar_squeezed = array_ops.squeeze(scalar, ())
self.assertEqual(scalar_squeezed.get_shape(), ())
def testSqueezeMatrix(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, [0])
self.assertEqual(matrix_squeezed.get_shape(), (3))
with self.assertRaises(ValueError):
matrix_squeezed = array_ops.squeeze(matrix, [1])
def testSqueezeScalarDim(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, 0)
self.assertEqual(matrix_squeezed.get_shape(), (3))
class ReverseV2Test(test_util.TensorFlowTestCase):
def testReverse0DimAuto(self):
x_np = 4
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(x_np, []).eval()
self.assertAllEqual(x_tf, x_np)
def _reverse1DimAuto(self, np_dtype):
x_np = np.array([1, 2, 3, 4, 5], dtype=np_dtype)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(x_np, [0]).eval()
self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
def _reverse2DimAuto(self, np_dtype):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np_dtype)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf_1 = array_ops.reverse_v2(x_np, [0]).eval()
x_tf_2 = array_ops.reverse_v2(x_np, [-2]).eval()
x_tf_3 = array_ops.reverse_v2(x_np, [1]).eval()
x_tf_4 = array_ops.reverse_v2(x_np, [-1]).eval()
x_tf_5 = array_ops.reverse_v2(x_np, [1, 0]).eval()
self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1])
# This is the version of reverse that uses axis indices rather than
# bool tensors
# TODO(b/32254538): Change this test to use array_ops.reverse
def testInvalid(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
with self.test_session():
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, [-30]).eval()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, [2]).eval()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"axis 0 specified more than once"):
array_ops.reverse_v2(x_np, [0, -2]).eval()
def testReverse1DimAuto(self):
for dtype in [
np.uint8, np.int8, np.int32, np.int64, np.bool, np.float16, np.float32,
np.float64, np.complex64, np.complex128
]:
self._reverse1DimAuto(dtype)
def testReverse2DimAuto(self):
for dtype in [
np.uint8, np.int8, np.int32, np.int64, np.bool, np.float16, np.float32,
np.float64, np.complex64, np.complex128
]:
self._reverse2DimAuto(dtype)
def testUnknownDims(self):
reverse_v2 = array_ops.reverse_v2
data_t = tf.placeholder(tf.float32)
axis_known_t = tf.placeholder(tf.int32, shape=[3])
reverse_known_t = reverse_v2(data_t, axis_known_t)
# Unlike V1 we cannot know this anymore
self.assertEqual(None, reverse_known_t.get_shape().ndims)
axis_unknown_t = tf.placeholder(tf.int32)
reverse_unknown_t = reverse_v2(data_t, axis_unknown_t)
self.assertIs(None, reverse_unknown_t.get_shape().ndims)
data_2d_t = tf.placeholder(tf.float32, shape=[None, None])
axis_2d_t = tf.placeholder(tf.int32, shape=[3])
reverse_2d_t = reverse_v2(data_2d_t, axis_2d_t)
self.assertEqual(2, reverse_2d_t.get_shape().ndims)
class MeshgridTest(test_util.TensorFlowTestCase):
def _compareDiff(self, x, y, use_gpu):
for index in ('ij', 'xy'):
numpy_out = np.meshgrid(x, y, indexing=index)
tf_out = array_ops.meshgrid(x, y, indexing=index)
with self.test_session(use_gpu=use_gpu):
for xx, yy in zip(numpy_out, tf_out):
self.assertAllEqual(xx, yy.eval())
def _compareDiffType(self, n, np_dtype, use_gpu):
inputs = []
for index in ('ij', 'xy'):
for i in range(n):
x = np.linspace(-10, 10, 5).astype(np_dtype)
if np_dtype in (np.complex64, np.complex128):
x += 1j
inputs.append(x)
numpy_out = np.meshgrid(*inputs, indexing=index)
with self.test_session(use_gpu=use_gpu):
tf_out = array_ops.meshgrid(*inputs, indexing=index)
for X, _X in zip(numpy_out, tf_out):
self.assertAllEqual(X, _X.eval())
def testCompare(self):
for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128):
self._compareDiffType(2, t, False)
self._compareDiffType(3, t, False)
x = [1, 2, 3]
y = [4, 5]
a = [[1, 1], [1, 1]]
self._compareDiff(x, y, False)
self._compareDiff(x, a, False)
class StridedSliceChecker(object):
"""Check a given tensor against the numpy result."""
REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
def __init__(self, test, x, tensor_type=tf.int32, check_type_infer=True):
self.test = test
self.x = tf.cast(tf.constant(x, dtype=tf.float32), dtype=tensor_type)
self.x_np = np.array(x)
self.check_type_infer = check_type_infer
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
if not isinstance(spec, (list, tuple)):
spec = [spec]
tensor = op.eval()
# Make a numpy spec that pre-evals the tensors
np_specs = []
def eval_if_tensor(x):
try:
return x.eval()
except AttributeError:
return x
for s in spec:
if isinstance(s, slice):
start = eval_if_tensor(s.start)
stop = eval_if_tensor(s.stop)
step = eval_if_tensor(s.step)
np_specs.append(slice(start, stop, step))
else:
np_specs.append(eval_if_tensor(s))
self.test.assertAllEqual(self.x_np[tuple(np_specs)], tensor)
if self.check_type_infer:
self.test.assertAllEqual(tensor.shape, op.get_shape())
return tensor
class StridedSliceTest(test_util.TensorFlowTestCase):
"""Test the strided slice operation with variants of slices."""
def test_basic_slice(self):
for tensor_type in [tf.int32, tf.int64, tf.int16, tf.int8, tf.float32,
tf.float64]:
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
checker = StridedSliceChecker(
self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
_ = checker[:, :, :]
# Various ways of representing identity slice
_ = checker[:, :, :]
_ = checker[::, ::, ::]
_ = checker[::1, ::1, ::1]
# Not zero slice
_ = checker[::1, ::5, ::2]
# Reverse in each dimension independently
_ = checker[::-1, :, :]
_ = checker[:, ::-1, :]
_ = checker[:, :, ::-1]
## negative index tests i.e. n-2 in first component
_ = checker[-2::-1, :, ::1]
# negative index tests i.e. n-2 in first component, non-unit stride
_ = checker[-2::-1, :, ::2]
# Check rank-0 examples
checker2 = StridedSliceChecker(self, 5, tensor_type=tf.int32)
_ = checker2[None]
_ = checker2[...]
_ = checker2[tuple()]
def testDegenerateSlices(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
# degenerate by offering a forward interval with a negative stride
_ = checker[0:-1:-1, :, :]
# degenerate with a reverse interval with a positive stride
_ = checker[-1:0, :, :]
# empty interval in every dimension
_ = checker[-1:0, 2:2, 2:3:-1]
def testEllipsis(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[0:]
# implicit ellipsis
_ = checker[0:, ...]
# ellipsis alone
_ = checker[...]
# ellipsis at end
_ = checker[0:1, ...]
# ellipsis at begin
_ = checker[..., 0:1]
# ellipsis at middle
_ = checker[0:1, ..., 0:1]
# multiple ellipses not allowed
with self.assertRaisesRegexp(ValueError, "Multiple ellipses"):
_ = checker[..., :, ...].eval()
def testShrink(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[:, :, :, :, 3]
_ = checker[..., 3]
_ = checker[:, 0]
_ = checker[:, :, 0]
def testTensorIndexing(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw, check_type_infer=False)
bar = tf.constant(2)
bar2 = tf.constant(3)
_ = checker[..., bar:bar2]
_ = checker[..., bar]
with self.assertRaisesRegexp(
TypeError,
"Value passed to parameter 'begin' has DataType float32 not in "
"list of allowed values"):
_ = checker[..., 3.0]
_ = checker[..., 3]
def testExpand(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
# new axis (followed by implicit ellipsis)
_ = checker[np.newaxis]
# newaxis after ellipsis
_ = checker[..., np.newaxis]
# newaxis in between ellipsis and explicit range
_ = checker[..., np.newaxis, :]
_ = checker[:, ..., np.newaxis, :, :]
# Reverse final dimension with new axis
_ = checker[:, :, np.newaxis, :, 2::-1]
# Ellipsis in middle of two newaxis
_ = checker[np.newaxis, ..., np.newaxis]
def testExpandVariable(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
x = tf.Variable(7, dtype=tf.int32)
x.initializer.run()
y = x[None].eval()
self.assertEqual(y.shape, (1,))
self.assertAllEqual(y, (7,))
def testOptimizedCases(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
checker = StridedSliceChecker(self,
StridedSliceChecker.REF_TENSOR_ALIGNED)
# Identity
_ = checker[:]
# Identity
_ = checker[...]
# Identity
_ = checker[np.newaxis, ..., np.newaxis]
# First axis slice
_ = checker[1:]
# First axis slice
_ = checker[np.newaxis, 1:]
class StridedSliceShapeChecker(object):
def __init__(self, x):
self.x = x
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
return op.get_shape()
class StridedSliceShapeTest(test_util.TensorFlowTestCase):
"""Test the shape inference of StridedSliceShapes."""
def testUnknown(self):
with self.test_session(use_gpu=False):
uncertain_tensor = tf.placeholder(tf.float32)
a = StridedSliceShapeChecker(uncertain_tensor)
a_slice_shape = a[...]
self.assertAllEqual(a_slice_shape.ndims, None)
def tensorShapeEqual(self, x, y):
self.assertTrue(x is not None and y is not None or x is None and y is None)
self.assertEqual(x.as_list(), y.as_list())
def testTensorShapeUncertain(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
uncertain_tensor = tf.placeholder(tf.float32, shape=(5, None, 7))
a = StridedSliceShapeChecker(uncertain_tensor)
self.tensorShapeEqual(a[3:5], tensor_shape.TensorShape([2, None, 7]))
self.tensorShapeEqual(a[3:5, :, 4], tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, 3:4, 4],
tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, :, 5:10],
tensor_shape.TensorShape([2, None, 2]))
self.tensorShapeEqual(a[3:5, :, 50:3],
tensor_shape.TensorShape([2, None, 0]))
self.tensorShapeEqual(a[3:5, :, tf.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[1:5:2, :, tf.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:5:3, :, tf.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:2:3, :, tf.newaxis, 50:3,],
tensor_shape.TensorShape([1, None, 1, 0]))
self.tensorShapeEqual(a[::-1, :, tf.newaxis, ::-2],
tensor_shape.TensorShape([5, None, 1, 4]))
def testTensorValuedIndexShape(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
defined_shape_tensor = tf.placeholder(tf.float32, shape=(5, 3, 7))
index_value = tf.placeholder(tf.int32, shape=())
a = StridedSliceShapeChecker(defined_shape_tensor)
self.tensorShapeEqual(a[index_value], tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-1],
tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-2],
tensor_shape.TensorShape([2, 7]))
other_scalar = tf.placeholder(tf.int32, shape=())
self.tensorShapeEqual(a[index_value, other_scalar:2],
tensor_shape.TensorShape([None, 7]))
class GradSliceChecker(object):
"""Tests that we can compute a gradient for var^2."""
def __init__(self, test, sess, var, varnp):
self.test = test
self.sess = sess
self.val = var * var
self.var = var
self.varnp = varnp
def __getitem__(self, spec):
slice_var = self.var[spec]
slice_val = self.val[spec]
# compute analytic 2nd derivative
analytic_grad2 = 2 * slice_val
dy = tf.Variable(tf.ones(shape=slice_var.get_shape(), dtype=tf.int32))
assign = dy.assign(slice_var)
slice_val_grad, = tf.gradients(slice_val, self.var, grad_ys=dy)
slice_val_grad2, = tf.gradients(slice_val_grad, dy, grad_ys=self.var)
self.sess.run(assign)
slice_val_grad_evaled, slice_val_grad2_evaled = (
self.sess.run([slice_val_grad, slice_val_grad2]))
analytic_grad2_evaled = analytic_grad2.eval()
self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
# compute analytic gradient for slice
np_val_grad = (2 * self.varnp * self.varnp)
np_sliceval_grad = np.zeros(self.var.get_shape())
np_sliceval_grad[spec] = np_val_grad[spec]
# verify gradient
self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
class StridedSliceGradTest(test_util.TensorFlowTestCase):
"""Test that strided slice's custom gradient produces correct gradients."""
def testGradient(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
var = tf.Variable(tf.reshape(tf.range(1, 97, 1), shape=(6, 4, 4)))
init = tf.global_variables_initializer()
sess.run(init)
grad = GradSliceChecker(self, sess, var,
np.array(range(1, 97, 1)).reshape((6, 4, 4)))
_ = grad[2:6:2, 1:3, 1:3]
_ = grad[3:0:-2, 1:3, 1:3]
_ = grad[3:0:-2, tf.newaxis, 1:3, 2, tf.newaxis]
_ = grad[3:0:-2, 1:3, 2]
_ = grad[:, -1, :]
_ = grad[:, -2, :]
with self.assertRaisesRegexp(ValueError, "out of bounds"):
_ = grad[:, -200, :]
with self.assertRaisesRegexp(ValueError, "out of bounds"):
_ = grad[:, 200, :]
def testGradientZero(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
var = tf.Variable(8)
init = tf.global_variables_initializer()
sess.run(init)
grad = GradSliceChecker(self, sess, var,
np.array(8))
_ = grad[tuple()]
class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
"""Test varied index types and host located memory."""
def testHostVsDevice(self):
with self.test_session(use_gpu=True) as sess:
var2 = tf.Variable(
tf.reshape(
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1)))
varshape = tf.Variable([6, 4, 4], dtype=tf.int32)
sess.run(tf.global_variables_initializer())
begin = tf.constant([0, 0, 0])
end = tf.constant([4, 1, 1])
strides = tf.constant([1, 1, 1])
foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
sess.run(foo)
def testInt64Shape(self):
with self.test_session(use_gpu=True) as sess:
original_dy = tf.reshape(
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1))
original_shape = tf.constant([6, 4, 4], dtype=tf.int64)
sess.run(tf.global_variables_initializer())
begin = tf.constant([0, 0, 0], dtype=tf.int64)
end = tf.constant([4, 1, 1], dtype=tf.int64)
strides = tf.constant([1, 1, 1], dtype=tf.int64)
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
def testMixedIndexTypes(self):
with self.test_session(use_gpu=True) as sess:
original_dy = tf.reshape(
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1))
original_shape = tf.constant([6, 4, 4], dtype=tf.int64)
sess.run(tf.global_variables_initializer())
begin = tf.constant([0, 0, 0], dtype=tf.int32)
end = tf.constant([4, 1, 1], dtype=tf.int64)
strides = tf.constant([1, 1, 1], dtype=tf.int64)
with self.assertRaisesRegexp(
TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32"
" that does not match type int64 of argument 'shape'"):
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
class BenchmarkSlice(object):
def __init__(self, tensor):
self.tensor = tensor
def __getitem__(self, x):
return self.tensor[x]
class StridedSliceBenchmark(tf.test.Benchmark):
"""Benchmark new strided slice operation on non-trivial case."""
def run_and_time(self, slice_op):
tf.global_variables_initializer().run()
for _ in range(10):
_ = slice_op.eval()
iters = 1000
t0 = time.time()
for _ in range(iters):
slice_op.eval()
t1 = time.time()
self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0)
def make_variable(self):
n = 256
shape = (n, n, n)
items = n**3
var = tf.Variable(
tf.reshape(
tf.linspace(1., float(items), items), shape),
dtype=tf.float32)
return var
def benchmark_strided_slice_skip(self):
with tf.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[::2, ::1, ::2]
self.run_and_time(slice_op)
def benchmark_strided_slice_easy(self):
with tf.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
def benchmark_slice_easy(self):
with tf.Session():
var = self.make_variable()
slice_op = var[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
class StridedSliceAssignChecker(object):
def __init__(self, test, x, tensor_type=tf.float32):
self.tensor_type = tensor_type
self.test = test
self.x = tf.cast(tf.constant(x, dtype=tf.float32), dtype=tensor_type)
self.x_np = np.array(x)
def __setitem__(self, index, value):
for use_gpu in [False, True]:
with self.test.test_session(use_gpu=use_gpu) as sess:
var = tf.Variable(self.x)
sess.run(tf.initialize_variables([var]))
val = sess.run(var[index].assign(
tf.constant(
value, dtype=self.tensor_type)))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
class SliceAssignTest(test_util.TensorFlowTestCase):
def testInvalidSlice(self):
with self.test_session() as sess:
foo = tf.constant([1, 2, 3])
with self.assertRaisesRegexp(ValueError, "Sliced assignment"
" is only supported for variables"):
bar = foo[:2].assign(tf.constant([1, 2]))
sess.run(bar)
def testSliceAssign(self):
checker = StridedSliceAssignChecker(self, [[1, 2, 3], [4, 5, 6]])
# Check if equal
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Check trivial (1,1) shape tensor
checker[1:2, 1:2] = [[666]]
# shrinks shape changes
checker[1:2, 1] = [666]
checker[1, 1:2] = [666]
checker[1, 1] = 666
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[999]]]
# Non unit strides
checker[::1, ::-2] = [[33, 333], [44, 444]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 2225)
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
def testUninitialized(self):
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
"Attempting to use uninitialized value Variable"):
with self.test_session() as sess:
v = tf.Variable([1, 2])
sess.run(v[:].assign([1, 2]))
class ShapeSizeRankTest(test_util.TensorFlowTestCase):
def testDenseShape(self):
with self.test_session():
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), tf.shape(t_value).eval())
self.assertEqual(4, tf.size(t_value).eval())
self.assertEqual(2, tf.rank(t_value).eval())
t = tf.constant(t_value)
self.assertAllEqual((2, 2), tf.shape(t).eval())
self.assertEqual(4, tf.size(t).eval())
self.assertEqual(2, tf.rank(t).eval())
def testSparseShape(self):
with self.test_session():
sp_value = tf.SparseTensorValue(
indices=((0, 1), (1, 0)),
values=(42, 24),
dense_shape=(2, 2))
self.assertAllEqual((2, 2), tf.shape(sp_value).eval())
self.assertEqual(4, tf.size(sp_value).eval())
self.assertEqual(2, tf.rank(sp_value).eval())
sp = tf.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), tf.shape(sp).eval())
self.assertEqual(4, tf.size(sp).eval())
self.assertEqual(2, tf.rank(sp).eval())
class SequenceMaskTest(test_util.TensorFlowTestCase):
def testExceptions(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "lengths must be 1D"):
tf.sequence_mask([[10, 20]], [10, 20])
with self.assertRaisesRegexp(ValueError, "maxlen must be scalar"):
tf.sequence_mask([10, 20], [10, 20])
def testNormal(self):
with self.test_session():
res = tf.sequence_mask(tf.constant([1, 3, 2]), 5)
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]])
# test dtype and default maxlen:
res = tf.sequence_mask(tf.constant([0, 1, 4]), dtype=tf.float32)
self.assertAllEqual(res.get_shape().as_list(), [3, None])
self.assertAllEqual(res.eval(), [[0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0]])
def testDtypes(self):
def check_dtypes(lengths_dtype, maxlen_dtype):
res = tf.sequence_mask(tf.constant([1, 3, 2], dtype=lengths_dtype),
tf.constant(5, dtype=maxlen_dtype))
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]])
with self.test_session():
check_dtypes(tf.int32, tf.int32)
check_dtypes(tf.int32, tf.int64)
check_dtypes(tf.int64, tf.int32)
check_dtypes(tf.int64, tf.int64)
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_21581 | from pandas_profiling.config import Settings
from pandas_profiling.report.formatters import fmt, fmt_bytesize, fmt_percent
from pandas_profiling.report.presentation.core import (
HTML,
Container,
Table,
VariableInfo,
)
def render_generic(config: Settings, summary: dict) -> dict:
info = VariableInfo(
anchor_id=summary["varid"],
alerts=summary["alerts"],
var_type="Unsupported",
var_name=summary["varname"],
description=summary["description"],
)
table = Table(
[
{
"name": "Missing",
"value": fmt(summary["n_missing"]),
"alert": "n_missing" in summary["alert_fields"],
},
{
"name": "Missing (%)",
"value": fmt_percent(summary["p_missing"]),
"alert": "p_missing" in summary["alert_fields"],
},
{
"name": "Memory size",
"value": fmt_bytesize(summary["memory_size"]),
"alert": False,
},
]
)
return {
"top": Container([info, table, HTML("")], sequence_type="grid"),
"bottom": None,
}
|
the-stack_0_21583 | # Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import cast
import streamlit
from streamlit.proto.IFrame_pb2 import IFrame as IFrameProto
class IframeMixin:
def _iframe(
self,
src,
width=None,
height=None,
scrolling=False,
):
"""Load a remote URL in an iframe.
Parameters
----------
src : str
The URL of the page to embed.
width : int
The width of the frame in CSS pixels. Defaults to the app's
default element width.
height : int
The height of the frame in CSS pixels. Defaults to 150.
scrolling : bool
If True, show a scrollbar when the content is larger than the iframe.
Otherwise, do not show a scrollbar. Defaults to False.
"""
iframe_proto = IFrameProto()
marshall(
iframe_proto,
src=src,
width=width,
height=height,
scrolling=scrolling,
)
return self.dg._enqueue("iframe", iframe_proto)
def _html(
self,
html,
width=None,
height=None,
scrolling=False,
):
"""Display an HTML string in an iframe.
Parameters
----------
html : str
The HTML string to embed in the iframe.
width : int
The width of the frame in CSS pixels. Defaults to the app's
default element width.
height : int
The height of the frame in CSS pixels. Defaults to 150.
scrolling : bool
If True, show a scrollbar when the content is larger than the iframe.
Otherwise, do not show a scrollbar. Defaults to False.
"""
iframe_proto = IFrameProto()
marshall(
iframe_proto,
srcdoc=html,
width=width,
height=height,
scrolling=scrolling,
)
return self.dg._enqueue("iframe", iframe_proto)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
def marshall(
proto,
src: Optional[str] = None,
srcdoc: Optional[str] = None,
width: Optional[int] = None,
height: Optional[int] = None,
scrolling: bool = False,
) -> None:
"""Marshalls data into an IFrame proto.
These parameters correspond directly to <iframe> attributes, which are
described in more detail at
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe.
Parameters
----------
proto : IFrame protobuf
The protobuf object to marshall data into.
src : str
The URL of the page to embed.
srcdoc : str
Inline HTML to embed. Overrides src.
width : int
The width of the frame in CSS pixels. Defaults to the app's
default element width.
height : int
The height of the frame in CSS pixels. Defaults to 150.
scrolling : bool
If true, show a scrollbar when the content is larger than the iframe.
Otherwise, never show a scrollbar.
"""
if src is not None:
proto.src = src
if srcdoc is not None:
proto.srcdoc = srcdoc
if width is not None:
proto.width = width
proto.has_width = True
if height is not None:
proto.height = height
else:
proto.height = 150
proto.scrolling = scrolling
|
the-stack_0_21584 | # -*- coding: utf-8 -*-
import os
import random
from jasper import plugin
def get_jokes(language='en-US'):
filename = os.path.join(os.path.dirname(__file__),
'data',
'%s.txt' % language)
jokes = []
found = []
with open(filename, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
found.append(line)
if len(found) == 2:
jokes.append(tuple(found))
found = []
return jokes
class JokePlugin(plugin.SpeechHandlerPlugin):
def __init__(self, *args, **kwargs):
super(JokePlugin, self).__init__(*args, **kwargs)
try:
language = self.profile['language']
except KeyError:
language = 'en-US'
try:
self._jokes = get_jokes(language)
except IOError as e:
if e.errno == 2:
self._jokes = []
else:
raise e
if len(self._jokes) == 0:
raise ValueError('Unsupported language!')
def get_phrases(self):
return [self.gettext("JOKE")]
def handle(self, text, mic):
"""
Responds to user-input, typically speech text, by telling a joke.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
"""
joke = random.choice(self._jokes)
mic.say(self.gettext("Knock knock"))
mic.active_listen()
mic.say(joke[0])
mic.active_listen()
mic.say(joke[1])
def is_valid(self, text):
"""
Returns True if the input is related to jokes/humor.
Arguments:
text -- user-input, typically transcribed speech
"""
return (self.gettext('JOKE').upper() in text.upper())
|
the-stack_0_21586 |
import sys
import textwrap
import argparse
import pandas as pd
# Way to call script:
def verify_synonym(input_file, output_file, synonym_file,
usagekeycol='gbifapi_usageKey',
acceptedkeycol='gbifapi_acceptedKey',
taxonomicstatuscol='gbifapi_status',
outputcol='nameMatchValidation'):
"""verify if more information on the synonyms is already present
Find out which of the synonyms were already registered as defined by the
synonym_file, by checking the match between the usageKey AND acceptedKey
as provided by GBIF. When a match is found, the status as registered in the
synonym-file is provided.
Parameters:
--------
input_file: str (filepath) | pd.DataFrame
input file to check the synonym values or pandas Dataframe
output_file: str (filepath) | None
output file to write result, if None, no output file will be created
synonym_file: str
relatie path to the synonym file for the verification
usagekeycol: str (default: gbifapi_usageKey)
column name with the usagekey for input_file
acceptedkeycol: str (default: gbifapi_acceptedKey)
column name with the acceptedKey for input_file
taxonomicstatuscol: str (default: gbif_apistatus)
column name with the API status of GBIF for input_file, NOT status
outputcol: str
column name to put the remarks of the verification of the input file
Remarks:
--------
For the synonym_file, the names of the usagekey, acceptedkey and status
columns are fixed and should be equal to respectively `gbifapi_usageKey`,
`gbifapi_acceptedKey` and 'status'
"""
if taxonomicstatuscol == "status":
raise Exception('Change name of the status column of your input file')
if isinstance(input_file, str):
# Reading in the files (csv or tsv)
if input_file.endswith('tsv'):
delimiter = '\t'
else:
delimiter = ','
input_file = pd.read_csv(input_file, sep=delimiter,
encoding='utf-8', dtype=object)
elif isinstance(input_file, pd.DataFrame):
delimiter = ',' # Patch: set a delimiter for the output file
input_file = input_file.copy()
else:
raise Exception('Input datatype not supported, use either str or a \
pandas DataFrame')
# read the synonyms file
synonyms = pd.read_csv(synonym_file, sep='\t', dtype=object)
#extract useful columns from synonym file (expected to be fixed)
synonyms_subset = synonyms[["gbifapi_usageKey", "gbifapi_acceptedKey", "status"]]
# Check the matches by doing a join of the inputfile with the synonym
verified = pd.merge(input_file, synonyms_subset, how='left',
left_on=[usagekeycol, acceptedkeycol],
right_on=["gbifapi_usageKey", "gbifapi_acceptedKey"])
# overwrite for SYNONYM values when already present
if outputcol in verified.columns:
verified.loc[verified[taxonomicstatuscol] == "SYNONYM", outputcol] = \
verified.loc[verified[taxonomicstatuscol] == "SYNONYM", 'status']
verified = verified.drop('status', axis=1)
else:
verified = verified.rename(columns={'status' : outputcol})
if (output_file != None) & isinstance(output_file, str):
verified.to_csv(output_file, sep=delimiter, index=False,
encoding='utf-8')
return verified
def main(argv=None):
"""
Use usagekeycol and acceptedkeycol to lookup a match in the synonymlist
* If match found and status = ok:
populate statuscol with: ok: SYNONYM verified
* If match found and status <> ok:
populate statuscol with: verify: SYNONYM <status>
* If no match found:
do nothing
"""
parser = argparse.ArgumentParser(description="""Lookup a match in the
synonymlist. If match and status is ok, the record is enlisted as
SYNONYM verified; if match but status is not ok, the record is
provided of a verify status. If no match is found, nothing is done.
""")
parser.add_argument('input_file', type=str,
help='the relative path and filename containing the usage and acceptedkey col')
parser.add_argument('output_file', action='store', default=None,
help='output file name, can be same as input')
parser.add_argument('--synonym_file', type=str,
action='store', default=None,
help='relative path and filename to the file containing the synonym status information')
parser.add_argument('--usagekeycol', type=str,
action='store', default='gbifapi_usageKey',
help='column name of the input file containing the gbif usage keys (default when not provided: `gbifapi_usageKey`)')
parser.add_argument('--acceptedkeycol', type=str,
action='store', default='gbifapi_acceptedKey',
help='column name of the input file containing the gbif accepted keys (default when not provided: `gbifapi_acceptedKey`)')
parser.add_argument('--taxonomicstatuscol', type=str,
action='store', default='gbifapi_status',
help='column name of the input file containing the gbif taxonomic matchin status information, e.g. SYNONYM (default when not provided: `gbifapi_status`)')
parser.add_argument('--outputcol', type=str,
action='store', default='nameMatchValidation',
help='column name of the output file to provide the information about the synonym status (default when not provided: `nameMatchValidation`)')
args = parser.parse_args()
print("Verification of the synonym names...")
print(textwrap.dedent("""\
Using {} as input file and searching matches
with the synonmys enlisted in {}
""".format(args.input_file,
args.synonym_file)))
print(textwrap.dedent("""\
Columns of usage_key and accepted_key as provided by
gbif in the input file are named respectively {}
and {}. The columns with the taxonomicstatus
(SYNONYM,...) is named {}
""".format(args.usagekeycol,
args.acceptedkeycol,
args.taxonomicstatuscol)))
print(textwrap.dedent("""\
Writing verification information to column {}
""".format(args.outputcol)))
verify_synonym(args.input_file, args.output_file,
args.synonym_file,
args.usagekeycol,
args.acceptedkeycol,
args.taxonomicstatuscol,
args.outputcol
)
print("".join(["saving to file", args.output_file, "...done!"]))
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_21587 | from __future__ import print_function
import os
import re
import burlap
from burlap import ContainerSatchel
from burlap.constants import *
from burlap.decorators import task
fabfile_template = os.path.join(
os.path.dirname(burlap.__file__),
'templates',
'burlap',
'fabfile.py.template',
)
def md(d):
if os.path.isdir(d):
return
os.makedirs(d)
def to_camelcase(value):
value = re.sub(r'[^a-zA-Z0-9]+', ' ', value).strip()
return ''.join(x.capitalize() for x in value.split(' '))
def init_dj(project_name, default_roles, virtualenv_dir='.env', version=None, **kwargs):
site_name = project_name
print('Installing Django...')
if version:
os.system('%s/bin/pip install Django==%s' % (virtualenv_dir, version))
else:
os.system('%s/bin/pip install Django' % virtualenv_dir)
print('Initializing Django project...')
if not os.path.isdir('src/%s' % site_name):
print('Initializing base django project...')
os.system('. %s/bin/activate; django-admin.py startproject %s src; deactivate' % (virtualenv_dir, site_name,))
_settings_fn = os.path.abspath('src/%s/settings.py' % project_name)
_content = open(_settings_fn, 'r').read()
_sites = '''SITE_{name_upper} = "{name_lower}"
SITES = (
SITE_{name_upper},
)
'''.format(
name_upper=project_name.upper(),
name_lower=project_name.lower(),
)
_top = []
for _role in default_roles:
_top.append("ROLE_%s = '%s'" % (_role.upper(), _role.lower()))
_top.append('ROLES = (')
for _role in default_roles:
_top.append(" ROLE_%s," % (_role.upper(),))
_top.append(')')
_index = _content.find('"""\n\n')+4
bottom_args = dict(
app_name=project_name,
app_name_title=project_name.title() + ' Administration',
app_name_simple=project_name.title()
)
_bottom = '''
PROJECT_DIR = os.path.abspath(os.path.join(os.path.split(__file__)[0], '..', '..'))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
MEDIA_URL = '/media/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
'%s/src/{app_name}/templates' % PROJECT_DIR,
)
# https://docs.djangoproject.com/en/1.11/ref/settings/#templates
TEMPLATES = [
{{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': TEMPLATE_DIRS,
'APP_DIRS': True,
'OPTIONS': {{
#'loaders': TEMPLATE_LOADERS, # Unnecessary if we're using APP_DIRS.
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
}},
}},
]
ADMIN_TITLE = '{app_name_title}'
ADMIN_TITLE_SIMPLE = '{app_name_simple}'
'''.format(**bottom_args)
open(_settings_fn, 'w').write(_content[:_index]+_sites+('\n'.join(_top))+_content[_index:]+_bottom)
print('Creating Django helper scripts...')
open('src/manage', 'w').write('''#!/bin/bash
# Helper script for ensuring we use the Python binary in our local
# virtual environment when calling management commands.
# Otherwise, we'd have to always run `. ../.env/bin/activate`, which can be
# annoying.
# Be sue to run `fab <role> pip.init` first in order to setup
# the target role's Python virtual environment.
DIR=`dirname $0`;
cd $DIR;
../.env/bin/python manage.py $@''')
open('src/runserver', 'w').write('''#!/bin/bash
# Helper script for running the local dev server, ensuring
# our virtual environment is used.
#set -e
#script_dir=`dirname $0`
#cd $script_dir
if [ -z "$PORT" ]; then
export PORT=8111
fi
if [ -z "$ROLE" ]; then
export ROLE=dev
fi
. ~/.bash_aliases
./manage runserver localhost:$PORT''')
open('src/shell', 'w').write(r'''#!/bin/bash
# Creates a local PIP-aware shell.
#set -e
if [ $_ == $0 ]
then
echo "Please source this script. Do not execute."
exit 1
fi
#script_dir=`dirname $0`
#cd $script_dir
. .env/bin/activate
PS1="\u@\h:\W(fab)\$ "''')
md('media')
md('static')
os.system('chmod +x src/shell')
os.system('chmod +x src/manage')
os.system('chmod +x src/runserver')
# Create the primary app for containing models/urls/views.
if not os.path.isdir('src/%s' % project_name):
os.system('cd src; ./manage startapp %s' % (project_name,))
os.system('cd src; ./manage syncdb')
class ProjectSatchel(ContainerSatchel):
name = 'project'
def set_defaults(self):
pass
def update_settings(self, d, role, path='roles/{role}/settings.yaml'):
"""
Writes a key/value pair to a settings file.
"""
try:
import ruamel.yaml
load_func = ruamel.yaml.round_trip_load
dump_func = ruamel.yaml.round_trip_dump
except ImportError:
print('Warning: ruamel.yaml not available, reverting to yaml package, possible lost of formatting may occur.')
import yaml
load_func = yaml.load
dump_func = yaml.dump
settings_fn = path.format(role=role)
data = load_func(open(settings_fn))
data.update(d)
settings_str = dump_func(data)
open(settings_fn, 'w').write(settings_str)
@task
def create_skeleton(self, project_name, roles='', components='', pip_requirements='', virtualenv_dir='.env', **kwargs):
assert project_name, 'Specify project name.'
site_name = project_name
app_name = project_name
default_roles = [_ for _ in roles.split(',') if _.strip()]
default_components = [_.strip().lower() for _ in components.split(',') if _.strip()]
print('Creating folders...')
md('roles/all')
for _role in default_roles:
md('roles/%s' % _role)
md('src')
print('Creating roles...')
open('roles/all/settings.yaml', 'w').write(
self.render_to_string(
'burlap/all_settings.yaml.template',
extra=dict(project_name=project_name, site_name=site_name, app_name=app_name)))
for _role in default_roles:
open('roles/%s/settings.yaml' % _role, 'w').write(
self.render_to_string(
'burlap/role_settings.yaml.template',
extra=dict(project_name=project_name, site_name=site_name, role=_role)))
default_packages = pip_requirements.split(',')
if default_packages:
open('roles/all/pip-requirements.txt', 'w').write('\n'.join(default_packages))
print('Adding global apt-requirements.txt...')
open('roles/all/apt-requirements.txt', 'w').write('')
print('Adding fabfile...')
content = open(fabfile_template, 'r').read()
content = content.format(project_name=project_name)
open('fabfile.py', 'w').write(content.strip()+'\n')
print('Initializing local development virtual environment...')
os.system('virtualenv --no-site-packages %s' % virtualenv_dir)
for package in default_packages:
os.system('. %s/bin/activate; pip install %s; deactivate' % (virtualenv_dir, package))
# Install burlap dependencies.
burlap_pip_requirements = os.path.join(os.path.dirname(burlap.__file__), 'fixtures/requirements.txt')
print('burlap_pip_requirements:', burlap_pip_requirements)
assert os.path.exists(burlap_pip_requirements), 'Missing requirements file: %s' % burlap_pip_requirements
for package in open(burlap_pip_requirements, 'r').readlines():
if not package.strip():
continue
cmd = '%s/bin/pip install %s' % (virtualenv_dir, package)
print('cmd:', cmd)
assert not os.system(cmd)
print('Adding bash setup...')
open('setup.bash', 'w').write(self.render_to_string('burlap/setup.bash.template'))
print('Adding gitignore...')
open('.gitignore', 'w').write(self.render_to_string('burlap/gitignore.template'))
args = kwargs.copy()
args['project_name'] = project_name
args['roles'] = roles
args['default_roles'] = default_roles
args['components'] = components
args['pip_requirements'] = pip_requirements
args['virtualenv_dir'] = virtualenv_dir
for component in default_components:
print('Setting up component %s...' % component)
# Get component-specific settings.
component_kwargs = dict(args)
for _k, _v in kwargs.items():
_key = component+'_'
if _k.startswith(_key):
component_kwargs[_k[len(_key):]] = _v
del component_kwargs[_k]
print('component_kwargs:', component_kwargs)
try:
globals()['init_%s' % component](**component_kwargs)
except KeyError:
pass
print('='*80)
print()
print('Skeleton created for project %s!' % (project_name.title(),))
print()
@task
def add_roles(self, roles):
for role in roles:
_role = role.strip().lower()
fn = 'roles/%s/settings.yaml' % _role
if os.path.isfile(fn):
continue
fn_dir = os.path.split(fn)[0]
if not os.path.isdir(fn_dir):
os.makedirs(fn_dir)
open(fn, 'w').write(
self.render_to_string('burlap/role_settings.yaml.template', extra=dict(role=_role)))
print('Added role %s!' % role)
@task
def create_satchel(self, name):
name_simple = re.sub(r'[^a-z0-9]+', '', name.lower())
content = self.render_to_string(
'burlap/satchel.py.template',
extra=dict(
name_camelcase=to_camelcase(name),
name_simple=name_simple,
))
if not os.path.isdir('satchels'):
os.makedirs('satchels')
os.system('touch satchels/__init__.py')
satchel_fn = 'satchels/%s.py' % name_simple
open(satchel_fn, 'w').write(content.strip()+'\n')
print('Wrote %s.' % satchel_fn)
project = ProjectSatchel()
|
the-stack_0_21590 | from collections import OrderedDict
from bactopia.parse import parse_bactopia_directory
def summarize(path: str) -> dict:
"""
Creates summary reports for a Bactopia directory.
Args:
path (str): Path to a directory containing Bactopia results
Returns:
dict: [description]
"""
results = parse_bactopia_directory(path)
def get_rank(cutoff: dict, coverage: float, quality: float, length: int, contigs: int, genome_size: int, is_paired: bool) -> list:
"""
Determine the rank (gold, silver, bronze, fail) based on user cutoffs.
Args:
cutoff (dict): Cutoffs set by users to determine rank
coverage (float): Estimated coverage of the sample
quality (float): Per-read average quality
length (int): Median length of reads
contigs (int): Total number of contigs
genome_size (int): Genome size of sample used in analysis
is_paired (bool): Sample used paired-end reads
Returns:
list: the rank and reason for the ranking
"""
rank = None
reason = []
coverage = float(f'{float(coverage):.2f}')
quality = float(f'{float(quality):.2f}')
length = round(float(f'{float(length):.2f}'))
contigs = int(contigs)
genome_size = int(genome_size)
gold = cutoff['gold']
silver = cutoff['silver']
bronze = cutoff['bronze']
if coverage >= gold['coverage'] and quality >= gold['quality'] and length >= gold['length'] and contigs <= gold['contigs'] and is_paired:
reason.append('passed all cutoffs')
rank = 'gold'
elif coverage >= silver['coverage'] and quality >= silver['quality'] and length >= silver['length'] and contigs <= silver['contigs'] and is_paired:
if coverage < gold['coverage']:
reason.append(f"Low coverage ({coverage:.2f}x, expect >= {gold['coverage']}x)")
if quality < gold['quality']:
reason.append(f"Poor read quality (Q{quality:.2f}, expect >= Q{gold['quality']})")
if length < gold['length']:
reason.append(f"Short read length ({length}bp, expect >= {gold['length']} bp)")
if contigs > gold['contigs']:
reason.append(f"Too many contigs ({contigs}, expect <= {gold['contigs']})")
rank = 'silver'
elif coverage >= bronze['coverage'] and quality >= bronze['quality'] and length >= bronze['length'] and contigs <= bronze['contigs']:
if coverage < silver['coverage']:
reason.append(f"Low coverage ({coverage:.2f}x, expect >= {silver['coverage']}x)")
if quality < silver['quality']:
reason.append(f"Poor read quality (Q{quality:.2f}, expect >= Q{silver['quality']})")
if length < silver['length']:
reason.append(f"Short read length ({length}bp, expect >= {silver['length']} bp)")
if contigs > silver['contigs']:
reason.append(f"Too many contigs ({contigs}, expect <= {silver['contigs']})")
if not is_paired:
reason.append(f"Single-end reads")
rank = 'bronze'
if not rank:
rank = 'exclude'
if coverage < bronze['coverage']:
reason.append(f"Low coverage ({coverage:.2f}x, expect >= {bronze['coverage']}x)")
if quality < bronze['quality']:
reason.append(f"Poor read quality (Q{quality:.2f}, expect >= Q{bronze['quality']})")
if length < bronze['length']:
reason.append(f"Short read length ({length:.2f}bp, expect >= {bronze['length']} bp)")
if contigs > bronze['contigs']:
reason.append(f"Too many contigs ({contigs}, expect <= {bronze['contigs']})")
if cutoff['min-assembled-size']:
if genome_size < cutoff['min-assembled-size']:
reason.append(f"Assembled size is too small ({genome_size} bp, expect <= {cutoff['min-assembled-size']})")
if cutoff['max-assembled-size']:
if genome_size < cutoff['max-assembled-size']:
reason.append(f"Assembled size is too large ({genome_size} bp, expect <= {cutoff['max-assembled-size']})")
reason = ";".join(sorted(reason))
return [rank, reason]
def print_failed(failed: list, spaces: int = 8) -> str:
"""
Format the strings of samples that failed
Args:
failed (list): A list of samples that failed for a particular reason
spaces (int, optional): Total number of spaces to indent. Defaults to 8.
Returns:
str: The set of formatted strings
"""
lines = []
for key, val in sorted(failed.items()):
if key != 'failed-cutoff':
lines.append(f'{spaces * " "}{key.replace("-", " ").title()}: {len(val)}')
return "\n".join(lines) if lines else ""
def print_cutoffs(cutoffs: list, spaces: int = 8) -> str:
"""
Format strings for samples that failed a cutoff.
Args:
cutoffs (list): A list of samples that failed for a cutoff
spaces (int, optional): Total number of spaces to indent. Defaults to 8.
Returns:
str: The set of formatted strings
"""
lines = []
for key, val in sorted(cutoffs.items()):
lines.append(f'{spaces * " "}{key}: {val}')
return "\n".join(lines) if lines else ""
def gather_results(sample: dict, rank: str, reason: str) -> dict:
"""
Aggregate results into an unnested dictionary.
Args:
sample (dict): The results associated with a sample
rank (str): The rank of a sample
reason (str): The reason for the given rank
Returns:
dict: An unnested dictionary of results
"""
results = OrderedDict((
('sample', sample['sample']),
('is_paired', sample['is_paired']),
('rank', rank),
('reason', reason),
('estimated_genome_size', sample['genome_size'])
))
results.update(_remove_keys(sample['results']['assembly']['stats'], ['fasta']))
if sample['results']['assembly']['checkm']:
results['checkm_lineage'] = sample['results']['assembly']["checkm"]["Marker lineage"]
results['checkm_completeness'] = sample['results']['assembly']["checkm"]["Completeness"]
results['checkm_contamination'] = sample['results']['assembly']["checkm"]["Contamination"]
results['checkm_heterogeneity'] = sample['results']['assembly']["checkm"]["Strain heterogeneity"]
results.update(_prefix_keys(sample['results']['quality-control']['original']['qc_stats'], 'original'))
results.update(_prefix_keys(sample['results']['quality-control']['final']['qc_stats'], 'final'))
results.update(_remove_keys(sample['results']['annotation']['stats'], ['organism', 'contigs', 'bases']))
results.update(_add_minmers(sample['results']['minmers']))
results.update(_add_mlst(sample['results']['mlst']))
return results
def _add_mlst(mlsts: dict) -> dict:
"""
Read through MLST results and create column each schema.
Args:
mlsts (dict): The MLST results associated with a sample
Returns:
dict: Per schema MLST hits
"""
results = OrderedDict()
for key, vals in mlsts.items():
schema, tool = key.split('-')
prefix = f"mlst_{tool}" if schema == "default" else f"mlst_{schema}_{tool}"
if tool == "blast":
results[f"{prefix}_st"] = vals['ST']['st']
results[f"{prefix}_loci"] = len(vals) - 1
results[f"{prefix}_perfect_matches"] = vals['ST']['perfect_matches']
else:
for k, v in vals.items():
results[f"{prefix}_{k.lower()}"] = v
return results
def _add_minmers(minmers: dict) -> dict:
"""
Read through minmer results and create column for top hit.
Args:
minmers (dict): Mash and Sourmash results against RefSeq and GenBank
Returns:
dict: Top hit description for each set of databases
"""
results = OrderedDict()
for key in ['refseq-k21', 'genbank-k21', 'genbank-k31', 'genbank-k51']:
if key in minmers:
prefix = key.replace('-', '_')
if len(minmers[key]):
if key.startswith('genbank'):
# Sourmash keys: "overlap", "p_query", "p_match", "match"
if minmers[key]['matches']:
results[f'{prefix}_match'] = minmers[key]['matches'][0]['match'].split("(")[0].rstrip()
results[f'{prefix}_overlap'] = minmers[key]['matches'][0]['overlap']
results[f'{prefix}_p_query'] = minmers[key]['matches'][0]['p_query']
results[f'{prefix}_p_match'] = minmers[key]['matches'][0]['p_match']
else:
results[f'{prefix}_match'] = None
results[f'{prefix}_overlap'] = None
results[f'{prefix}_p_query'] = None
results[f'{prefix}_p_match'] = None
results[f'{prefix}_no_assignment'] = minmers[key]['no_assignment']
results[f'{prefix}_total'] = len(minmers[key]['matches'])
else:
# Mash keys: "identity", "shared-hashes", "median-multiplicity", "p-value", "query-ID", "query-comment"
results[f'{prefix}_id'] = minmers[key][0]['query-ID']
results[f'{prefix}_identity'] = minmers[key][0]['identity']
results[f'{prefix}_shared_hashes'] = minmers[key][0]['shared-hashes']
results[f'{prefix}_median_multiplicity'] = minmers[key][0]['median-multiplicity']
results[f'{prefix}_p_value'] = minmers[key][0]['p-value']
results[f'{prefix}_comment'] = minmers[key][0]['query-comment']
results[f'{prefix}_total'] = len(minmers[key])
return results
def _remove_keys(results: dict, remove: list) -> dict:
"""
Remove a set of keys from a dictionary.
Args:
results (dict): The dictionary of results
remove (list): the keys to remove
Returns:
dict: The altered dictionary
"""
removed = {}
for key, val in results.items():
if key not in remove:
removed[key] = val
return removed
def _prefix_keys(results: dict, prefix: str) -> dict:
"""
Add a prefix to existing keys
Args:
results (dict): The dictionary of results
prefix (str): A string to prefix each key with
Returns:
dict: The result dictionary with prefixed keys.
"""
prefixed = {}
for key, val in results.items():
prefixed[f'{prefix}_{key}'] = val
return prefixed
|
the-stack_0_21594 | # qubit number=4
# total number=9
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += X(2) # number=6
prog += H(3) # number=4
prog += Y(3) # number=5
prog += Y(3) # number=7
prog += Y(3) # number=8
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil47.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_0_21595 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import json
import pymysql
import time, datetime
from decimal import Decimal
import sys
import os
sys.path.append("..")
import config
connection = pymysql.connect(host=config.host, port=config.port, user=config.user, password=config.password, db=config.db)
def Delete():
with connection.cursor() as cursor:
cursor.execute("delete from Vote;")
connection.commit()
print("Delete OK")
def Insert(max_id):
with connection.cursor() as cursor:
sql = "INSERT Vote(addr,a1) SELECT `to` as addr,SUM(amount) as a1 from Tx where left(`to`,4) != '20w0' and id <= %d GROUP BY `to`" % max_id
cursor.execute(sql)
connection.commit()
print("Insert OK")
def Update(max_id):
with connection.cursor() as cursor:
sql = "SELECT form,SUM(amount+free) as a2 from Tx where left(form,4) != '20w0' and form != '000000000000000000000000000000000000000000000000000000000' and id <= %d GROUP BY form" % max_id
cursor.execute(sql)
rows = cursor.fetchall()
A2 = {}
for r in rows:
A2[r[0]] = r[1]
print("out amout...")
sql = "SELECT client_in,SUM(amount) as v1 from Tx where client_in is not null and id <= %d GROUP BY client_in" % max_id
cursor.execute(sql)
rows = cursor.fetchall()
V1 = {}
for r in rows:
V1[r[0]] = r[1]
print("in vote...")
sql = "SELECT client_out,SUM(amount+free) as v2 from Tx where client_out is not null and id <= %d GROUP BY client_out" % max_id
cursor.execute(sql)
rows = cursor.fetchall()
V2 = {}
for r in rows:
V2[r[0]] = r[1]
print("out vote...")
cursor.execute("select * from Vote")
rows = cursor.fetchall()
for r in rows:
a2 = 0
if r[1] in A2:
a2 = A2[r[1]]
v1 = 0
if r[1] in V1:
v1 = V1[r[1]]
v2 = 0
if r[1] in V2:
v2 = V2[r[1]]
sql = "update Vote set a2 = %s,v1 = %s,v2 = %s where id = %s"
cursor.execute(sql,[a2,v1,v2,r[0]])
connection.commit()
print("update OK")
def Export(height):
data = []
f = open(('./%d.json' % height), 'w')
with connection.cursor() as cursor:
cursor.execute("select addr,(a1 - a2) as a,(v1 - v2) as v from Vote")
rows = cursor.fetchall()
for r in rows:
if r[1] > 0 or r[2] > 0:
data.append({
"address": r[0],
"balance": float(r[1]),
"vote": float(r[2])})
if r[1] < 0 or r[2] < 0:
print("err:",r)
sys.exit()
f.write(json.dumps(data))
f.close()
print("Export OK")
def GetID(height):
with connection.cursor() as cursor:
sql = "select hash from Block where height = %d and is_useful = 1" % height
cursor.execute(sql)
sql = "select max(id) as id from Tx where block_hash = '%s'" % cursor.fetchone()[0]
cursor.execute(sql)
return cursor.fetchone()[0]
if __name__ == '__main__':
height = 592443
max_id = GetID(height)
Delete()
Insert(max_id)
Update(max_id)
Export(height) |
the-stack_0_21597 | import pygal
from pygal.style import RotateStyle
from jinja2.filters import do_filesizeformat
# Formatting functions
number_formatter = lambda v: '{:,}'.format(v)
bytes_formatter = lambda v: do_filesizeformat(v, True)
def tables_piechart(db, by_field, value_formatter):
'''
Generate a pie chart of the top n tables in the database.
`db` - the database instance
`by_field` - the field name to sort by
`value_formatter` - a function to use for formatting the numeric values
'''
Tables = db.get_model_for_table('tables', system_table=True)
qs = Tables.objects_in(db).filter(database=db.db_name, is_temporary=False).exclude(engine='Buffer')
tuples = [(getattr(table, by_field), table.name) for table in qs]
return _generate_piechart(tuples, value_formatter)
def columns_piechart(db, tbl_name, by_field, value_formatter):
'''
Generate a pie chart of the top n columns in the table.
`db` - the database instance
`tbl_name` - the table name
`by_field` - the field name to sort by
`value_formatter` - a function to use for formatting the numeric values
'''
ColumnsTable = db.get_model_for_table('columns', system_table=True)
qs = ColumnsTable.objects_in(db).filter(database=db.db_name, table=tbl_name)
tuples = [(getattr(col, by_field), col.name) for col in qs]
return _generate_piechart(tuples, value_formatter)
def _get_top_tuples(tuples, n=15):
'''
Given a list of tuples (value, name), this function sorts
the list and returns only the top n results. All other tuples
are aggregated to a single "others" tuple.
'''
non_zero_tuples = [t for t in tuples if t[0]]
sorted_tuples = sorted(non_zero_tuples, reverse=True)
if len(sorted_tuples) > n:
others = (sum(t[0] for t in sorted_tuples[n:]), 'others')
sorted_tuples = sorted_tuples[:n] + [others]
return sorted_tuples
def _generate_piechart(tuples, value_formatter):
'''
Generates a pie chart.
`tuples` - a list of (value, name) tuples to include in the chart
`value_formatter` - a function to use for formatting the values
'''
style = RotateStyle('#9e6ffe', background='white', legend_font_family='Roboto', legend_font_size=18, tooltip_font_family='Roboto', tooltip_font_size=24)
chart = pygal.Pie(style=style, margin=0, title=' ', value_formatter=value_formatter, truncate_legend=-1)
for t in _get_top_tuples(tuples):
chart.add(t[1], t[0])
return chart.render(is_unicode=True, disable_xml_declaration=True)
|
the-stack_0_21600 | import glob
import os
from surrogateGeneration import SurrogateGeneration
import threading
import configparser
# threading
class PipeThread(threading.Thread):
def __init__(self, threadName, subset, sg):
threading.Thread.__init__(self)
self.threadName = threadName
self.subset = subset
self.sg = sg
# process files
def run(self):
print("starting {} with {} files".format(self.threadName, len(self.subset)))
self.sg.collectFiles(self.subset, self.threadName)
print('Exiting {}'.format(self.threadName))
# run surrogate generation for subsets of files
def runSurrogateGeneration(parameters):
sg = SurrogateGeneration(parameters)
files = glob.glob(os.path.join(parameters['settings']['path_input'], '**', '*.ann'), recursive=True)
print('{} files to process'.format(len(files)))
threadNr = int(parameters['settings']['threads'])
threads = []
for i in range(0, threadNr):
thread = PipeThread("Thread-{}".format(str(i)), files[i::threadNr], sg)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print('{} files processed'.format(sg.nrFiles))
# get configuration
def getConfig():
config = configparser.ConfigParser()
config.read('param.conf')
return config
if __name__ == '__main__':
runSurrogateGeneration(getConfig()) |
the-stack_0_21601 | import sys
import os
import shutil
def show_help():
sys.stderr.write('To use this script, attach files to the email and send with the subject savefiles')
sys.exit(0)
if __name__ == '__main__':
if sys.argv[4].upper().find('HELP') != -1:
show_help()
if len(sys.argv) != 6:
sys.stderr.write('No files were attached. Nothing was saved.\n')
files = eval(sys.argv[5])
for name, path in files:
new_location = os.path.join(os.environ['HOME'] + '/Downloads', name)
shutil.move(path, new_location)
sys.stderr.write('{0} was saved to {1}\n', name, new_location)
|
the-stack_0_21602 | """
You have a long flowerbed in which some of the plots are planted,
and some are not. However, flowers cannot be planted in adjacent plots.
Given an integer array flowerbed containing 0's and 1's, where 0 means
empty and 1 means not empty, and an integer n, return if n new flowers
can be planted in the flowerbed without violating
the no-adjacent-flowers rule.
Example:
Input: flowerbed = [1,0,0,0,1], n = 1
Output: true
Example:
Input: flowerbed = [1,0,0,0,1], n = 2
Output: false
Constraints:
- 1 <= flowerbed.length <= 2 * 10^4
- flowerbed[i] is 0 or 1.
- There are no two adjacent flowers in flowerbed.
- 0 <= n <= flowerbed.length
"""
#Difficulty: Easy
#123 / 123 test cases passed.
#Runtime: 168 ms
#Memory Usage: 14.5 MB
#Runtime: 168 ms, faster than 41.51% of Python3 online submissions for Can Place Flowers.
#Memory Usage: 14.5 MB, less than 66.37% of Python3 online submissions for Can Place Flowers.
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
count = 0
flowerbed = [0] + flowerbed + [0]
for x in flowerbed:
if x == 0:
count += 1
else:
count = 0
if count == 3:
n -= 1
count = 1
if not n:
break
return not n
|
the-stack_0_21604 | #%% IMPORT MODULES
from __future__ import division
import pickle
import argparse
import json
from os.path import exists
import numpy as np
import grr.GIF_network as gfn
from grr.Tools import check_dict_fields
from ezephys.stimtools import BiexponentialSynapticKernel
#%% PARSE COMMANDLINE ARGUMENTS
parser = argparse.ArgumentParser()
parser.add_argument(
'--sermods',
type=str,
required=True,
help='Pickled serotonin neuron models.',
)
parser.add_argument(
'--gabamods', type=str, required=True, help='Pickled GABA neuron models.'
)
parser.add_argument(
'--prefix',
type=str,
required=True,
help='Path to save GIF_network models.',
)
parser.add_argument(
'--opts', type=str, required=True, help='Path to opts JSON file.'
)
parser.add_argument(
'-r',
'--replicates',
default=1,
type=int,
help='No. of randomized models to generate.',
)
parser.add_argument(
'--seed', type=int, default=42, help='Random seed (default 42).'
)
parser.add_argument(
'--overwrite', action='store_true', help='Overwrite existing models.'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Print information about progress.',
)
args = parser.parse_args()
# Parse JSON opts file.
with open(args.opts, 'r') as f:
opts = json.load(f)
f.close()
# Ensure JSON object contains required fields.
required_fields = {
'dt': None,
'propagation_delay': None,
'gaba_input': {
'tau_rise': None,
'tau_decay': None,
'amplitude': None,
'reversal': None,
'duration': None,
},
'no_ser_neurons': None,
'no_gaba_neurons': None,
'connection_probability': None,
'fixed_IA_conductance': None,
'output_model_suffixes': {
'base': None,
'noIA': None,
'fixedIA': None,
'adaptation_swap': None,
'homogenous_adaptation_swap': None,
'homogenous': None,
'homogenous_GABA_only': None,
},
}
check_dict_fields(opts, required_fields)
#%% LOAD GIF MODELS
if args.verbose:
print('Loading 5HT models from {}'.format(args.sermods))
with open(args.sermods, 'rb') as f:
sergifs = pickle.load(f)
f.close()
if args.verbose:
print('Loading GABA models from {}'.format(args.gabamods))
with open(args.gabamods, 'rb') as f:
somgifs = pickle.load(f)
f.close()
if args.verbose:
print('Done loading single cell models!')
# SET RANDOM SEED
np.random.seed(args.seed)
# HELPER FUNCTION
def construct_file_name(number, kind):
"""Get file name for saving gifnet.
Filename format is:
`<args.prefix>_<number>_<kindsuffix>`
"""
if kind not in opts['output_model_suffixes'].keys():
raise ValueError('Unrecognized model kind {}'.format(kind))
fname = '_'.join(
[args.prefix, str(number), opts['output_model_suffixes'][kind]]
)
return fname
def safe_export(builder, number, model_kind):
"""Only export if the model doesn't already exist, or overwrite is set.
Parameters
----------
builder : GIFnetBuilder
model_kind : str
Used for the file name.
"""
file_name = construct_file_name(number, model_kind)
if args.overwrite:
if exists(file_name):
print('Model {} already exists. Overwriting.'.format(file_name))
builder.export_to_file(file_name)
elif not exists(file_name):
builder.export_to_file(file_name)
elif args.verbose:
print('Model {} already exists. Skipping.'.format(file_name))
# GENERATE MODELS
gaba_kernel = BiexponentialSynapticKernel(
size=opts['gaba_input']['amplitude'],
tau_rise=opts['gaba_input']['tau_rise'],
tau_decay=opts['gaba_input']['tau_decay'],
size_method='amplitude',
duration=opts['gaba_input']['duration'],
dt=opts['dt'],
front_padded=True,
)
subsample_builder = gfn.SubsampleGIFnetBuilder(
sergifs,
somgifs,
opts['no_ser_neurons'],
opts['no_gaba_neurons'],
opts['propagation_delay'],
gaba_kernel,
opts['gaba_input']['reversal'],
opts['connection_probability'],
opts['dt'],
'base',
)
homogenous_builder = gfn.HomogenousGIFnetBuilder(
sergifs,
somgifs,
opts['no_ser_neurons'],
opts['no_gaba_neurons'],
opts['propagation_delay'],
gaba_kernel,
opts['gaba_input']['reversal'],
opts['connection_probability'],
opts['dt'],
'homogenous',
)
for i in range(args.replicates):
if args.verbose:
print(
'Assembling GIFnet model set {} of {}.'.format(
i + 1, args.replicates
)
)
# Vanilla model.
subsample_builder.random_build()
safe_export(subsample_builder, i, 'base')
# Model with 5HT DV replaced by GABA value
swapped_dv_builder = gfn.SwappedDVGIFnetBuilder(
subsample_builder, opts['dt'], 'dv_swap_ser_only'
)
swapped_dv_builder.graft_gaba_dv_onto_ser()
safe_export(swapped_dv_builder, i, 'dv_swap_ser_only')
# Fixed IA.
fixedIA_builder = gfn.FixedIAGIFnetBuilder(
subsample_builder, opts['dt'], 'fixedIA'
)
fixedIA_builder.fix_IA(opts['fixed_IA_conductance'], None)
safe_export(fixedIA_builder, i, 'fixedIA')
# IA knockout.
fixedIA_builder.label = 'noIA'
fixedIA_builder.fix_IA(0.0, None)
safe_export(fixedIA_builder, i, 'noIA')
# Model with 5HT adaptation replaced by GABA adaptation.
swapped_adaptation_builder = gfn.SwappedAdaptationGIFnetBuilder(
subsample_builder, opts['dt'], 'adaptation_swap_ser_only'
)
swapped_adaptation_builder.swap_adaptation(
gaba_onto_ser=True, ser_onto_gaba=False
)
safe_export(swapped_adaptation_builder, i, 'adaptation_swap_ser_only')
# Model with 5HT adaptation AND DV replaced by GABA values
swapped_dv_adaptation_builder = gfn.SwappedDVGIFnetBuilder(
swapped_adaptation_builder, opts['dt'], 'dv_adaptation_swap_ser_only'
)
swapped_dv_adaptation_builder.graft_gaba_dv_onto_ser()
safe_export(
swapped_dv_adaptation_builder, i, 'dv_adaptation_swap_ser_only'
)
# Model with 5HT adaptation replaced by GABA AND VICE VERSA
swapped_adaptation_builder.swap_adaptation()
safe_export(swapped_adaptation_builder, i, 'adaptation_swap')
# Model with homogenous 5HT and GABA.
homogenous_builder.homogenous_build(
homogenous_5HT=True, homogenous_GABA=True
)
safe_export(homogenous_builder, i, 'homogenous')
# Model with homogenous 5HT and GABA and swapped adaptation.
homogenous_swapped_builder = gfn.SwappedAdaptationGIFnetBuilder(
homogenous_builder, opts['dt'], 'homogenous_adaptation_swap'
)
homogenous_swapped_builder.swap_adaptation()
safe_export(homogenous_builder, i, 'homogenous_adaptation_swap')
# Model with homogenous GABA and heterogenous 5HT.
homogenous_builder.homogenous_build(
homogenous_5HT=False, homogenous_GABA=True
)
safe_export(homogenous_builder, i, 'homogenous_GABA_only')
if args.verbose:
print('Finished! Exiting.')
|
the-stack_0_21606 | import numpy as np
# Here we define a number of functions used by FeTa.
# 1) FastNetTrim: contains initializations of the main variables and the outer loops of the algorithm.
# 2) minibatch: generates a minibatch from the data
# 3) gradient_h: computes the gradient of the concave part of the objective.
# 3) SVRG_minibatch_acc: performs SVRG algorithm from A PROXIMAL STOCHASTIC GRADIENT METHOD WITH PROGRESSIVE VARIANCE REDUCTION with acceleration.
# 5) objective: evaluates the data fidelity term of the objective using the current value of U.
# 4) prox_l1: performs the proximal step for sparsifying the matrix U.
# 6) cal_metrics: evaluates the sparsity of the matrix U.
# 7 grad_apr: this is a script for approximating the gradient of the rectifier. we started originally from an approximation as well but note that q(x)=exp(x)/(1+exp(x)) is numerically unstable for large values in X (overflow). We can instead replace it with 1 for x>=5 0 for x<-5 and exp(x)/(1+exp(x)) for other values
# 8) add_bias: adds values of 1 to the latent representations. This is needed to compute a layer bias.
#Inputs:
# Xtr: The latent input representations for the training set.
# Ytr: The latent output representations for the training set.
# Xval: The latent input representations for the validation set.
# Yval: The latent output representations for the validation set.
# params: A parameter vector for controlling the optimisation.
#Outputs:
# U: The pruned layer, the first row U[0,:] is the layer bias.
# obj: The values of the objective per iteration.
# Usparsity: The sparsity level per iteration.
def FastNetTrim(Xtr,Ytr,Xval,Yval,params):
#initialize obj
obj = np.empty([int(params[0])])
Usparsity = np.empty([int(params[0])])
Xtr = add_bias(Xtr)
Xval = add_bias(Xval)
dimX = Xtr.shape[0]
dimY = Ytr.shape[0]
#initialize U
U = np.random.normal(0,0.001,(dimX,dimY)) #Concerning matrix dimensions, U.T*X is a valid multiplication.
for i in range(0,int(params[0]) ):
# Compute gradient of concave part Maybe redundant??
grad_H = gradient_h(U,Xtr,Ytr,params)
# Perform gradient step on linearized objective
U = SVRG_minibatch_acc(U,grad_H,Xtr,Ytr,params)
# Compute Objective at the given location
obj[i] = objective(U,Xval,Yval,params)
Usparsity[i] = cal_metrics(U,params)
print("Iteration:", i,":: obj:",obj[i],":: sparsity:",Usparsity[i])
return(U,obj,Usparsity)
def minibatch(Xts,Yts,params):
indices = np.random.randint(0,Xts.shape[1],int(params[4]))
X = Xts[:,indices]
Y = Yts[:, indices]
return(X,Y)
def gradient_h(U,X,Y,params):
theta = params[1]
grad_H = 2 * (np.maximum(Y, 0)) * grad_apr(theta*(U.T @ X ) ) @ X.T * (1 / params[7])
return(grad_H)
def SVRG_minibatch_acc(U,grad_H,Xts,Yts,params):
s_size = 3
hta = params[5]
theta = params[1]
X_til_s = U
b = params[4]
n = params[7]
m = int(n/b)
beta = 0.95
for s in range(0,s_size):
print("Outer Iteration: ",s)
X_til = X_til_s
grad_til = 2 * ((1/theta)*np.log( 1+np.exp( theta*(X_til.T @ Xts) ) )) * grad_apr(theta*(X_til.T @ Xts) ) @ Xts.T * (1/params[7])
X_i = X_til
Y_i = X_til
for i in range(0,m):
X, Y = minibatch(Xts, Yts, params)
com1 = 2 * ((1 / theta) * np.log(1 + np.exp(theta * (Y_i.T @ X)))) * grad_apr(theta * (Y_i.T @ X))*(1/params[4])
com2 = 2 * ((1/theta)*np.log( 1+np.exp( theta*(X_til.T @ X) ) )) * grad_apr(theta*(X_til.T @ X) )*(1/params[4])
grad_i = (com1-com2)@ X.T+grad_til
tmp = Y_i-hta*(grad_i.T-grad_H.T)
X_i_1 = prox_l1(tmp,params[8])
Y_i = X_i_1 + beta*(X_i_1-X_i)
X_i = X_i_1
X_til_s = X_i
return X_til_s
def objective(U,X,Y,params):
obj = np.linalg.norm( (1/params[1])*np.log(1+params[1]*np.exp(U.T @ X) ) - Y)
obj = (obj*obj)/X.shape[1]
return obj
def prox_l1(U,spL):
U = np.maximum(0, np.abs(U) - spL) * np.sign(U)
return U
def cal_metrics(U, params):
loc = np.where(np.abs(U) == 0)
Usparsity = (loc[0].shape[0])/(U.shape[0]*U.shape[1])*100
return(Usparsity)
def grad_apr(x):
pos_1 = np.where(x>=10)
pos_0 = np.where(x<=-10)
pos_other = np.where(x<10) and np.where(x>-10)
#Set ones
x[pos_1]=1
#Set zeros
x[pos_0]=0
#Set other values
x[pos_other]= np.exp(x[pos_other])*np.reciprocal(1+ np.exp(x[pos_other]) )
return x
def add_bias(X):
X = np.vstack( ( np.ones((1,X.shape[1])) ,X ) )
return(X)
|
the-stack_0_21607 | #!/usr/bin/env python
import rospy
import sys
import time
import math
import rosnode
from geometry_msgs.msg import Pose, PoseStamped, PoseWithCovarianceStamped, Twist
from actionlib_msgs.msg import GoalStatusArray
from robotics.msg import Task, Bid, AuctionResult
from std_msgs.msg import Int16
wait_time_check_state = 2.0 # Time to wait before changing navigation state
class Robot():
def __init__(self, robot_ns):
self.robot_ns = robot_ns # namespace
self.pose = Pose()
self.velocity = 0.4 # max velocity (overridden by planner ros params)
self.state = -1 # not active
self.ready = 0 # not ready
# Published topics
self.bid_pub = rospy.Publisher('bid', Bid, queue_size=1)
self.move_pub = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=1)
self.ready_pub = rospy.Publisher('bidder_ready', Int16, queue_size=1)
# Subscribed topics
rospy.Subscriber('amcl_pose', PoseWithCovarianceStamped, self.pose_callback)
rospy.Subscriber('/auction', Task, self.task_callback)
rospy.Subscriber('/auction_result', AuctionResult, self.result_callback)
rospy.Subscriber('move_base/status', GoalStatusArray, self.state_callback)
def pose_callback(self, msg): # Update robot pose
self.pose = msg.pose.pose
def task_callback(self, msg): # Distance-bid calculation if ready
if self.ready:
distance = (math.sqrt((msg.x2-msg.x1)**2+(msg.y2-msg.y1)**2) +
math.sqrt((msg.x1-self.pose.position.x)**2+(msg.y1-self.pose.position.y)**2))
self.bid_pub.publish(distance/self.velocity)
def result_callback(self, msg):
if msg.robot_id == self.robot_ns: # If robot is choosen by auctioneer:
self.ready = 0
self.ready_pub.publish(0) # Temporarily not ready for future bids
goal = self.calculate_goal(msg.task.x1, msg.task.y1)
self.move_pub.publish(goal) # Publish initial goal to navigation planner
time.sleep(wait_time_check_state)
while self.state != 3: # While navigating, sleep auction process
time.sleep(0.2)
goal = self.calculate_goal(msg.task.x2, msg.task.y2)
self.move_pub.publish(goal) # Publish end goal
time.sleep(wait_time_check_state)
while self.state != 3:
time.sleep(0.2)
self.ready = 1
self.ready_pub.publish(1) # Ready again for next auction
def state_callback(self, msg): # Update robot state
if len(msg.status_list) == 0:
self.state = -1
else:
self.state = msg.status_list[-1].status
def calculate_goal(self, x, y): # Calculate goal for navigation planner
goal = PoseStamped()
goal.header.frame_id = "map"
goal.header.stamp = rospy.Time.now()
goal.pose.position.x = x
goal.pose.position.y = y
goal.pose.position.z = 0
goal.pose.orientation.x = 0
goal.pose.orientation.y = 0
goal.pose.orientation.z = 0
goal.pose.orientation.w = 1
return goal
if __name__ == '__main__':
rospy.init_node('bidder') # Init node bidder (under namespace)
rate = rospy.Rate(2.0)
robot_ns = sys.argv[1] # namespace
robot = Robot(robot_ns)
started = False
while not rospy.is_shutdown():
if not started:
nodes = rosnode.get_node_names()
if '/auctioneer' in nodes: # If auctioneer ready, start
started = True
robot.velocity = rospy.get_param('move_base/TebLocalPlannerROS/max_vel_x')
robot.ready_pub.publish(1)
robot.ready = 1
if robot.ready: # Everything correct, robot is up and ready
robot.ready_pub.publish(1)
rate.sleep()
|
the-stack_0_21608 | # 3p
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.interpolate import UnivariateSpline
import torch
class LRFinder:
"""Implements Leslie N. Smith's Learning rate finder method. Use `.find()` method to search for the best learning rate.
use `.plot()` method to plot the loss
"""
def __init__(self, model, optimizer, criterion, cuda=True):
"""
Arguments:
model {torch.nn.Module} -- Used model
optimizer {torch.optim.Optimizer} -- Used optimizer
criterion {torch.nn.Module)} -- Loss function
Keyword Arguments:
cuda {bool} -- Use cuda if available (default: {True})
"""
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = torch.device('cuda') if (cuda and torch.cuda.is_available()) else torch.device('cpu')
self.history = {"loss": [], "lr": []}
# set device
self.model.to(self.device)
self.criterion.to(self.device)
def find(self, train_loader, val_loader=None, num_iter=100, init_value=1e-6, final_value=10., div_th=5, beta=0.98):
"""Performs the learning rate range test.
Arguments:
train_loader {torch.utils.data.DataLoader} -- Training set dataloader
Keyword Arguments:
val_loader {torch.utils.data.DataLoader} -- Validation set dataloader. If None, range test will be performed only with train_loader (default: {None})
num_iter {int} -- Maximum number of iteration. Determines the discretization of the interval (default: {100})
init_value {float} -- Minimun learning rate to start with. (default: {1e-6})
final_value {float} -- Maximum learnig rate before stopping the range test (default: {10.})
div_th {int} -- Stop the range test if the loss attains div_th * min_loss (default: {5})
beta {float} -- Parameter used to smooth the loss. must be in [0, 1) (default: {0.98})
"""
best_loss = float("inf")
lr = init_value
self.optimizer.param_groups[0]['lr'] = lr
lr_update = (final_value / init_value) ** (1/num_iter) # we use an exponential step mode
avg_loss = 0
# iterate over training data
iterator = iter(train_loader)
for iteration in tqdm(range(num_iter)):
# Get a new set of inputs and labels
try:
inputs, labels = next(iterator)
except StopIteration:
iterator = iter(train_loader)
inputs, labels = next(iterator)
# train model using batch
if val_loader is None:
loss = self._train_val_model(inputs, labels, val_loader)
else:
loss = self._train_val_model(inputs, labels, val_loader, phase="val")
# Update the lr for the next step
self.history["lr"].append(lr)
lr *= lr_update
self.optimizer.param_groups[0]['lr'] = lr
# smooth loss and check for divergence
avg_loss = beta * avg_loss + (1-beta) * loss
smoothed_loss = avg_loss / (1 - beta**(iteration+1))
self.history["loss"].append(smoothed_loss)
if smoothed_loss > div_th * best_loss:
break
elif smoothed_loss < best_loss:
best_loss = smoothed_loss
print("LR Finder is complete. See the graph using `.plot()` method.")
def _train_val_model(self, inputs, labels, val_loader, phase="train"):
"""train the model for one mini-batch if phase==train, perform a validation step otherwise
Arguments:
inputs {torch.Tensor} -- Input data
labels {torch.Tensor} -- Labels of the input data
val_loader {torch.utils.data.DataLoader} -- Validation set dataloader.
Keyword Arguments:
phase {str} -- Either `train` or `val` (default: {"train"})
Returns:
{float} -- loss obtained
"""
if phase == 'train':
self.model.train() # Set model to training mode
else:
self.model.eval() # Set model to evaluate mode
running_loss = 0.0
# Iterate over data.
dataloader = [(inputs, labels)] if phase == 'train' else val_loader
for inputs, labels in dataloader:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = self.model(inputs)
predictions = torch.max(outputs, 1)[1]
loss = self.criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
self.optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
epoch_loss = (running_loss / inputs.size(0)) if phase == 'train' else (running_loss / len(dataloader.dataset))
return epoch_loss
def plot(self, skip_start=10, skip_end=5, log_lr=True, smooth=True, save_plot_to=None):
"""Plot the learning rate range test.
Keyword Arguments:
skip_start {int} -- number of batches to trim from the start (default: {10})
skip_end {int} -- number of batches to trim from the end (default: {5})
log_lr {bool} -- True to plot the learning rate in a logarithmic scale (default: {True})
smooth {bool} -- True to smooth the loss function using UnivariateSpline smoother (default: {True})
save_plot_to {[type]} -- Path to where to save the figure. None to disable saving. (default: {None})
"""
assert (skip_start >= 0 and skip_end >= 0), "skip_start and skip_end must be>=0!"
lrs = self.history["lr"][skip_start:-skip_end] if skip_end > 0 else self.history["lr"][skip_start:]
losses = self.history["loss"][skip_start:-skip_end] if skip_end > 0 else self.history["loss"][skip_start:]
if smooth:
spl = UnivariateSpline(lrs, losses)
losses = spl(lrs)
# get minimum lr over loss and gradient
mg = (np.gradient(np.array(losses))).argmin()
ml = np.argmin(losses)
print(f"Min numerical gradient: {lrs[mg]}")
print(f"Min loss: {lrs[ml]}")
# Plot loss as a function of the learning rate
plt.plot(lrs, losses)
plt.plot(lrs[mg], losses[mg], markersize=10, marker='o', color='red')
plt.plot(lrs[ml], losses[ml], markersize=10, marker='x', color='green')
plt.legend(["Loss", "Min numerical gradient", "Min loss"])
if log_lr:
plt.xscale("log")
plt.xlabel("Learning rate")
plt.ylabel("Loss")
if save_plot_to is not None:
plt.savefig(save_plot_to)
plt.show()
|
the-stack_0_21609 | # %%
# ======================== Create Tensor ======================== #
import torch
from torch.types import Number
scaler = torch.tensor(1)
vector = torch.tensor([1, 2, 3, 4, 5, 6])
matrix = torch.tensor([[1, 2, 3], [4, 5, 6]],dtype=torch.float32)
tensor3D = torch.tensor([
[[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]],
[[10, 11, 12, 13], [13, 14, 15, 16], [16, 17, 18, 19]]
])
print(scaler, scaler.shape)
print('\n', vector, vector.shape)
print(f'\nvector.shape: {vector.shape}')
print(f'\nvector.size: {vector.size()}')
print('\n', matrix, matrix.shape)
print(f'\nmatrix.dtype: {matrix.dtype}')
print('\n', tensor3D, tensor3D.shape)
# %%
# ======================== 4D Tensor (2,3,4,2) ======================== #
tensor4D = torch.tensor([
[
[[1, 2], [3, 4], [5, 6], [7, 8]],
[[1, 2], [3, 4], [5, 6], [7, 8]],
[[1, 2], [3, 4], [5, 6], [7, 8]]
],
[
[[1, 2], [3, 4], [5, 6], [7, 8]],
[[1, 2], [3, 4], [5, 6], [7, 8]],
[[1, 2], [3, 4], [5, 6], [7, 8]]
]
])
print(tensor4D)
print(f"\ntensor4D.shape: {tensor4D.shape}")
# %%
# ======================== Questions? ======================== #
# What is the difference between torch.tensor and torch.Tensor?
'''
In PyTorch 'torch.Tensor' is the main tensor class. So all tensors are just instances of 'torch.Tensor'.
When you call 'torch.Tensor()' you will get an empty tensor without any data.
In contrast 'torch.tensor' is a function which returns a tensor. In the documentation it says:
torch.tensor(data, dtype=None, device=None, requires_grad=False) --> Tensor --> Constructs a tensor with data.
This also also explains why it is no problem creating an empty tensor instance of `torch.Tensor` without `data` by calling:
'tensor_without_data = torch.Tensor()'
But on the other side:
'tensor_without_data = torch.tensor()'
Will lead to an error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-12-ebc3ceaa76d2> in <module>()
----> 1 torch.tensor()
TypeError: tensor() missing 1 required positional arguments: "data"
---------------------------------------------------------------------------
But in general there is no reason to choose `torch.Tensor` over `torch.tensor`. Also `torch.Tensor` lacks a docstring.
Similar behaviour for creating a tensor without data like with: torch.Tensor() can be achieved using:
'torch.tensor(())'
Output:
'tensor([])'
'''
# %%
# ======================== Slicing_Indexing_dtype ======================== #
|
the-stack_0_21614 | """ This python file contains tests for d3ploy's sharing capability
of one commodity between different facilities, for both
DemandDrivenDeploymentInst and SupplyDrivenDeploymentInst
archetypes.
"""
import json
import re
import subprocess
import os
import sqlite3 as lite
import pytest
import copy
import glob
import sys
import numpy as np
import d3ploy.tester as functions
from nose.tools import assert_in, assert_true, assert_equals
# Delete previously generated files
direc = os.listdir('./')
hit_list = glob.glob('*.sqlite') + glob.glob('*.json') + glob.glob('*.png')
for file in hit_list:
os.remove(file)
ENV = dict(os.environ)
ENV['PYTHONPATH'] = ".:" + ENV.get('PYTHONPATH', '')
TEMPLATE = {
"simulation": {
"archetypes": {
"spec": [
{"lib": "agents", "name": "NullRegion"},
{"lib": "agents", "name": "NullInst"},
{"lib": "cycamore", "name": "Source"},
{"lib": "cycamore", "name": "Reactor"},
{"lib": "cycamore", "name": "Sink"},
{"lib": "d3ploy.demand_driven_deployment_inst",
"name": "DemandDrivenDeploymentInst"},
{"lib": "d3ploy.supply_driven_deployment_inst",
"name": "SupplyDrivenDeploymentInst"}
]
},
"control": {"duration": "4", "startmonth": "1", "startyear": "2000"},
"facility": [
{
"config": {"Source": {"outcommod": "sourceout",
"outrecipe": "sourceoutrecipe",
"throughput": "1E5"}},
"name": "source"
},
{
"config": {"Sink": {"in_commods": {"val": "reactorout"},
"max_inv_size": "1E6"}},
"name": "sink"
},
{
"config": {
"Reactor": {
"assem_size": "100",
"cycle_time": "1",
"fuel_incommods": {"val": "sourceout"},
"fuel_inrecipes": {"val": "sourceoutrecipe"},
"fuel_outcommods": {"val": "reactorout"},
"fuel_outrecipes": {"val": "reactoroutrecipe"},
"n_assem_batch": "1",
"n_assem_core": "1",
"power_cap": "2",
"refuel_time": "0"
}
},
"name": "reactor1"
},
{
"config": {
"Reactor": {
"assem_size": "100",
"cycle_time": "1",
"fuel_incommods": {"val": "sourceout"},
"fuel_inrecipes": {"val": "sourceoutrecipe"},
"fuel_outcommods": {"val": "reactorout"},
"fuel_outrecipes": {"val": "reactoroutrecipe"},
"n_assem_batch": "1",
"n_assem_core": "1",
"power_cap": "3",
"refuel_time": "0"
}
},
"name": "reactor2"
}
],
"recipe": [
{
"basis": "mass",
"name": "sourceoutrecipe",
"nuclide": [{"comp": "0.711", "id": "U235"},
{"comp": "99.289", "id": "U238"}]
},
{
"basis": "mass",
"name": "reactoroutrecipe",
"nuclide": [{"comp": "50", "id": "Kr85"},
{"comp": "50", "id": "Cs137"}]
}
]
}
}
# ------------------------------------------------------------------ #
# Two prototypes of reactor are deployed. They produce 2 and 3 MW,
# respectively. The sharing percentages are 40 and 60%. The
# power demand increases by 10 MW every timestep. Then to meet
# the demand, two reactors of each type has to be deployed.
# This test will fail if at any time step two of each type of
# reactor are not deployed.
share_template = copy.deepcopy(TEMPLATE)
share_template["simulation"].update({"region": {
"config": {"NullRegion": "\n "},
"institution": [
{"config": {"NullInst": "\n "},
"initialfacilitylist": {
"entry": [
{"number": "1", "prototype": "source"},
{"number": "1", "prototype": "sink"}
]
},
"name": "sink_source_facilities"
},
{"config": {
"DemandDrivenDeploymentInst": {
"calc_method": "ma",
"facility_capacity": {
"item": [
{"capacity": "2", "facility": "reactor1"},
{"capacity": "3", "facility": "reactor2"}
]
},
"facility_commod": {
"item": [
{"commod": "POWER", "facility": "reactor1"},
{"commod": "POWER", "facility": "reactor2"}
]
},
"facility_sharing": {
"item": [
{"percentage": "40", "facility": "reactor1"},
{"percentage": "60", "facility": "reactor2"}
]
},
"demand_eq": "10*t",
}
},
"name": "reactor_inst"
}
],
"name": "SingleRegion"
}
})
def test_supply_buffer():
output_file_share = 'share.sqlite'
input_file_share = output_file_share.replace('.sqlite', '.json')
with open(input_file_share, 'w') as f:
json.dump(share_template, f)
s = subprocess.check_output(['cyclus',
'-o',
output_file_share,
input_file_share],
universal_newlines=True,
env=ENV)
# check number of reactors deployed
cur_share = functions.get_cursor(output_file_share)
reactors = cur_share.execute("select entertime, prototype from " +
"agententry where prototype = " +
"'reactor1' or prototype = " +
"'reactor2'").fetchall()
j = 0
count_errors = 0
for i in range(1, 4):
count_reactor1 = 0
count_reactor2 = 0
while int(reactors[j][0]) <= i:
if reactors[j][1] == 'reactor1':
count_reactor1 += 1
if reactors[j][1] == 'reactor2':
count_reactor2 += 1
j += 1
if j == len(reactors):
break
if count_reactor1 != count_reactor2:
count_errors += 1
assert(count_errors == 0)
|
the-stack_0_21615 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import socket
from . import packet_base
from . import packet_utils
from . import ipv4
from ryu.ofproto import ether
class mpls(packet_base.PacketBase):
"""MPLS (RFC 3032) header encoder/decoder class.
NOTE: When decoding, this implementation assumes that the inner protocol
is IPv4.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the correspondig args in this order.
============== ====================
Attribute Description
============== ====================
label Label Value
exp Experimental Use
bsb Bottom of Stack
ttl Time To Live
============== ====================
"""
_PACK_STR = '!I'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, label=0, exp=0, bsb=1, ttl=255):
super(mpls, self).__init__()
self.label = label
self.exp = exp
self.bsb = bsb
self.ttl = ttl
@classmethod
def parser(cls, buf):
(label,) = struct.unpack_from(cls._PACK_STR, buf)
ttl = label & 0xff
bsb = (label >> 8) & 1
exp = (label >> 9) & 7
label = label >> 12
msg = cls(label, exp, bsb, ttl)
if bsb:
return msg, ipv4.ipv4, buf[msg._MIN_LEN:]
else:
return msg, mpls, buf[msg._MIN_LEN:]
def serialize(self, payload, prev):
val = self.label << 12 | self.exp << 9 | self.bsb << 8 | self.ttl
return struct.pack(mpls._PACK_STR, val)
|
the-stack_0_21617 | from typing import Tuple
import pandas as pd
import xarray as xr
import glob
import logging
import os
import datetime as dt
logger = logging.getLogger(__name__)
def discover_single_file_for_basin(data_dir: str, basin: str) -> str:
"""
Discovers a single dataset file for the specified basin. Discovery will be performed using the pattern
'{data_dir}/*{basin}*', i.e. the basin ID has to be present in any file name within the directory. Note, that
basin id '123' e.g. will match the following file names: 123_streamflow.txt, 123.nc, 00123456_daymet_v4_daily_na.nc4,
streamflow_123.csv. Be sure, that your file names are unique, otherwise only the first occurence will be returned.
Parameters
----------
data_dir: str
The data directory used for discovering a dataset file related to the specified basin
basin: str
ID of the basin
Returns
-------
str
Path of the file, which is related to the specified basin
"""
# TODO Think about more sophisticated file discovery using regex, such as (?<![0-9])basin(?![0-9])
files = glob.glob(f"{data_dir}/**/*{basin}*", recursive=True)
if len(files) == 0:
raise FileNotFoundError(f"Can't find file for basin {basin} within directory {data_dir}.")
if len(files) > 1:
logger.warning(f"Found multiple files for basin {basin} within directory {data_dir}. "
f"First one found will be returned.")
return files[0]
def discover_files_for_basins(data_dir: str, basins: list) -> list:
"""
Parameters
----------
data_dir: str
The data directory used for discovering dataset files related to the specified basins
basins: list
List of basin IDs
Returns
-------
list
List that holds the dataset path to each basin
"""
return [discover_single_file_for_basin(data_dir, b) for b in basins]
def discover_daymet_files(root_data_dir: str, variables: list):
"""
Discovers all Daymet NetCDF files from a root directory for given variables.
Folder structure and file naming must follow the following convention {root_dir}/{variable}/*_daymet_v4_daily_na_{variable}_*.nc.
E.g. for given variables [var1, var2] the method will discover within the directory {root_dir}/var1 all NetCDF files
with filename *_daymet_v4_daily_na_var1_*.nc and within the directory {root_dir}/var2 all NetCDF files with filename
*_daymet_v4_daily_na_var2_*.nc. Files and directories that do not follow these conventions will be ignored.
Parameters
----------
root_data_dir: str
Root data dir
variables: list
List of variables that should be considered for file discovery
Returns
-------
List
List of NetCDF file paths
"""
file_list = []
for variable in variables:
data_dir = os.path.join(root_data_dir, variable)
files = glob.glob(f"{data_dir}/*_daymet_v4_daily_na_{variable}_*.nc")
if not files:
logger.warning(f"No files found in path {data_dir} for variable {variable}.")
else:
file_list.extend(files)
return file_list
def discover_single_camels_us_forcings_file(data_dir: str, forcings_type: str, basin: str):
"""
Discovers a single CAMELS-US forcing file by using the pattern '{data_dir}/**/{basin}_lump_{forcings_type}_forcing_leap.txt'.
Parameters
----------
data_dir: str
Path to the CAMELS-US data directory for forcings.
forcings_type: str
Type of the forcings timeseries, i.e. one of 'daymet', 'maurer', or 'nldas'
basin: str
ID of the basin, the forcings file will be discovered for.
Returns
-------
str
Path to the discovered forcings file
"""
type_dict = {"daymet": "cida", "maurer": "maurer", "nldas": "nldas"}
if forcings_type in type_dict:
files = glob.glob(f"{data_dir}/**/{basin}_lump_{type_dict[forcings_type]}_forcing_leap.txt", recursive=True)
if len(files) == 0:
raise FileNotFoundError(f"Can't find file for basin {basin} within directory {data_dir}.")
if len(files) > 1:
logger.warning(f"Found multiple files for basin {basin} within directory {data_dir}. "
f"First one found will be returned.")
else:
raise ValueError(f"Invalid forcings type `{forcings_type}` specified.")
return files[0]
def discover_single_camels_us_streamflow_file(data_dir: str, basin: str):
"""
Discovers a single CAMELS-US streamflow file by using the pattern '{data_dir}/**/{basin}_streamflow_qc.txt'.
Parameters
----------
data_dir: str
Path to the CAMELS-US data directory for streamflow.
basin: str
ID of the basin, the streamflow file will be discovered for.
Returns
-------
str
Path to the discovered streamflow file
"""
files = glob.glob(f"{data_dir}/**/{basin}_streamflow_qc.txt", recursive=True)
if len(files) == 0:
raise FileNotFoundError(f"Can't find file for basin {basin} within directory {data_dir}.")
if len(files) > 1:
logger.warning(f"Found multiple files for basin {basin} within directory {data_dir}. "
f"First one found will be returned.")
return files[0]
def discover_multiple_camels_us_forcings_files(data_dir: str, forcings_type: str, basins: list = None):
"""
Discovers multiple CAMELS-US forcing files. All files will be considered that follow the pattern
'{data_dir}/**/*_lump_{forcings_type}_forcing_leap.txt'.
Parameters
----------
data_dir: str
Path to the CAMELS-US data directory for forcings.
forcings_type: str
Type of the forcing timeseries, i.e. one of 'daymet', 'maurer', or 'nldas'
basins: list
List of basins, the forcings files will be discovered for. If 'None', all present files will be considered
Returns
-------
list
List of forcing file paths for the specified basins.
"""
type_dict = {"daymet": "cida", "maurer": "maurer", "nldas": "nldas"}
if forcings_type in type_dict:
files = glob.glob(f"{data_dir}/**/*_lump_{type_dict[forcings_type]}_forcing_leap.txt", recursive=True)
if basins is not None:
files = [f for f in files if (any(basin == os.path.basename(f)[0:8] for basin in basins))]
else:
raise ValueError(f"Invalid forcings type `{forcings_type}` specified.")
return files
def discover_multiple_camels_us_streamflow_files(data_dir: str, basins: list = None):
"""
Discovers multiple CAMELS-US streamflow files. All files will be considered that follow the pattern
'{data_dir}/**/*_streamflow_qc.txt'.
Parameters
----------
data_dir: str
Path to the CAMELS-US data directory for streamflow
basins: list
List of basins, the streamflow files will be discovered for. If 'None', all present files will be considered.
Returns
-------
list
List of streamflow file paths for the specified basins.
"""
files = glob.glob(f"{data_dir}/**/*_streamflow_qc.txt")
if basins is not None:
files = [f for f in files if (any(basin == os.path.basename(f)[0:8] for basin in basins))]
return files
def load_forcings(path: str, ds_type: str):
"""
Load a dataset that contains forcing data
Parameters
----------
path: str
Path to the forcings dataset
ds_type: str
Type of dataset. One of {camels-us, daymet-2d}
Returns
-------
Dataset contating forcings timeseries data
"""
if ds_type == "camels-us":
return load_forcings_camels_us(path)
if ds_type == "daymet-2d":
return load_forcings_daymet_2d(path)
raise ValueError("Unsupported forcings dataset type '{}'".format(ds_type))
def load_forcings_camels_us(path: str) -> pd.DataFrame:
"""
Loads CAMELS forcing data from raw text files
Parameters
----------
path: str
Path to the raw text file containing forcing data for a certain basin
Returns
-------
pd.DataFrame
DataFrame containing DateTime indexed forcing data for a basin
"""
colnames = pd.read_csv(path, sep=' ', skiprows=3, nrows=1, header=None)
df = pd.read_csv(path, sep='\t', skiprows=4, header=None, decimal='.',
names=colnames.iloc[0, 3:])
dates = df.iloc[:, 0]
df = df.drop(columns=df.columns[0])
df["date"] = pd.to_datetime(dates.str.split(expand=True)
.drop([3], axis=1)
.rename(columns={0: "year", 1: "month", 2: "day"}))
df = df.set_index("date")
return df
def load_forcings_gauge_metadata(path: str) -> Tuple[float, float, float]:
"""
Loads gauge metadata from the header of a CAMELS-USE forcings file.
Parameters
----------
path: str
Path to the forcings file.
Returns
-------
tuple
(gauge latitude, gauge elevation, basin area [m²])
"""
with open(path, 'r') as file:
latitude = float(file.readline())
elevation = float(file.readline())
area = float(file.readline())
return latitude, elevation, area
def load_forcings_daymet_2d(path: str) -> xr.Dataset:
"""
Parameters
----------
path: str
Path to a Daymet NetCDF dataset
Returns
-------
xarray.Dataset
Dataset hat contains 2-dimensional Daymet forcings data
"""
with xr.open_dataset(path) as ds:
return ds
def load_multiple_forcings_daymet_2d(file_paths: list) -> xr.Dataset:
"""
Loads multiple Daymet forcings NetCDF files from a list of file paths as Dask arrays wrapped within a Dataset.
Parameters
----------
file_paths: str
File paths to multiple Daymet NetCDF datasets
Returns
-------
xarray.Dataset
Dataset hat contains 2-dimensional Daymet forcings data
"""
with xr.open_mfdataset(file_paths, engine='h5netcdf') as ds:
return ds
def load_forcings_daymet_2d_from_zarr(store_dir: str) -> xr.Dataset:
"""
Loads Daymet forcings from a Zarr store as Dask arrays wrapped within a Dataset.
Parameters
----------
store_dir: str
Zarr store directory
Returns
-------
xarray.Dataset
Dataset hat contains 2-dimensional Daymet forcings data
"""
with xr.open_zarr(store_dir) as ds:
return ds
def load_streamflow(path: str, ds_type: str):
"""
Load streamflow data
Parameters
----------
path: str
Path to a streamflow dataset
ds_type: str
Type of the streamflow dataset. One of {camels-us}
Returns
-------
Dataset containing streamflow timeseries data
"""
if ds_type == "camels-us":
return load_streamflow_camels_us(path)
raise ValueError("Unsupported streamflow dataset type '{}'".format(ds_type))
def load_streamflow_camels_us(path: str) -> pd.DataFrame:
"""
Loads CAMELS streamflow data from raw text files
Parameters
----------
path: str
Path to the raw text file containing streamflow data for a certain basin
Returns
-------
pd.DataFrame
DataFrame containing DateTime indexed streamflow data for a basin
"""
df = pd.read_csv(path, delim_whitespace=True, header=None, decimal='.', na_values=["-999.00"],
names=["gauge_id", "year", "month", "day", "streamflow", "qc_flag"], dtype={"gauge_id": str})
df["date"] = pd.to_datetime(df[["year", "month", "day"]])
df = df.drop(columns=["year", "month", "day"]).set_index("date")
return df
def load_camels_us_basin_physical_characteristics(path: str) -> pd.DataFrame:
"""
Loads physical characteristics for CAMEL-US basins
Parameters
----------
path: str
Path to the metadata file
Returns
-------
pd.DataFrame
DataFrame containing physical characteristics for CAMEL-US basins
"""
return pd.read_csv(path, delim_whitespace=True, decimal='.', dtype={"BASIN_ID": str})
def load_camels_us_gauge_information(path: str) -> pd.DataFrame:
"""
Loads gauge information metadata for CAMEL-US basins
Parameters
----------
path: str
Path to the metadata file
Returns
-------
pd.DataFrame
DataFrame containing physical characteristics for CAMEL-US basins
"""
return pd.read_csv(path, delim_whitespace=True, decimal='.', dtype={"HUC_02": str, "GAGE_ID": str})
def create_out_dir(output: str, name: str) -> str:
"""
Creates a directory in the given output folder for a given name and the current timestamp that can be used for
storing outputs such as logs, monitoring metrics or saved models
Parameters
----------
output: str
Output directory
name: str
Name of the current run
Returns
-------
str
Path of the created directory
"""
timestamp = dt.datetime.now().strftime("%Y%m%d%H%M%S%z")
out_dir = os.path.join(output, f"{timestamp}_{name}")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
logger.info(f"Created directory {out_dir} for storing outputs.")
else:
logger.warning(f"Directory {out_dir} already exists.")
return out_dir
|
the-stack_0_21618 | import os
from sympy import sin, cos
from sympy.external import import_module
from sympy.testing.pytest import skip
from sympy.parsing.autolev import parse_autolev
antlr4 = import_module("antlr4")
if not antlr4:
disabled = True
FILE_DIR = os.path.dirname(
os.path.dirname(os.path.abspath(os.path.realpath(__file__))))
def _test_examples(in_filename, out_filename, test_name=""):
in_file_path = os.path.join(FILE_DIR, 'autolev', 'test-examples',
in_filename)
correct_file_path = os.path.join(FILE_DIR, 'autolev', 'test-examples',
out_filename)
with open(in_file_path) as f:
generated_code = parse_autolev(f, include_numeric=True)
with open(correct_file_path) as f:
for idx, line1 in enumerate(f):
if line1.startswith("#"):
break
try:
line2 = generated_code.split('\n')[idx]
assert line1.rstrip() == line2.rstrip()
except Exception:
msg = 'mismatch in ' + test_name + ' in line no: {0}'
raise AssertionError(msg.format(idx+1))
def test_rule_tests():
l = ["ruletest1", "ruletest2", "ruletest3", "ruletest4", "ruletest5",
"ruletest6", "ruletest7", "ruletest8", "ruletest9", "ruletest10",
"ruletest11", "ruletest12"]
for i in l:
in_filepath = i + ".al"
out_filepath = i + ".py"
_test_examples(in_filepath, out_filepath, i)
def test_pydy_examples():
l = ["mass_spring_damper", "chaos_pendulum", "double_pendulum",
"non_min_pendulum"]
for i in l:
in_filepath = os.path.join("pydy-example-repo", i + ".al")
out_filepath = os.path.join("pydy-example-repo", i + ".py")
_test_examples(in_filepath, out_filepath, i)
def test_autolev_tutorial():
dir_path = os.path.join(FILE_DIR, 'autolev', 'test-examples',
'autolev-tutorial')
if os.path.isdir(dir_path):
l = ["tutor1", "tutor2", "tutor3", "tutor4", "tutor5", "tutor6",
"tutor7"]
for i in l:
in_filepath = os.path.join("autolev-tutorial", i + ".al")
out_filepath = os.path.join("autolev-tutorial", i + ".py")
_test_examples(in_filepath, out_filepath, i)
def test_dynamics_online():
dir_path = os.path.join(FILE_DIR, 'autolev', 'test-examples',
'dynamics-online')
if os.path.isdir(dir_path):
ch1 = ["1-4", "1-5", "1-6", "1-7", "1-8", "1-9_1", "1-9_2", "1-9_3"]
ch2 = ["2-1", "2-2", "2-3", "2-4", "2-5", "2-6", "2-7", "2-8", "2-9",
"circular"]
ch3 = ["3-1_1", "3-1_2", "3-2_1", "3-2_2", "3-2_3", "3-2_4", "3-2_5",
"3-3"]
ch4 = ["4-1_1", "4-2_1", "4-4_1", "4-4_2", "4-5_1", "4-5_2"]
chapters = [(ch1, "ch1"), (ch2, "ch2"), (ch3, "ch3"), (ch4, "ch4")]
for ch, name in chapters:
for i in ch:
in_filepath = os.path.join("dynamics-online", name, i + ".al")
out_filepath = os.path.join("dynamics-online", name, i + ".py")
_test_examples(in_filepath, out_filepath, i)
def test_output_01():
"""Autolev example calculates the position, velocity, and acceleration of a
point and expresses in a single reference frame::
(1) FRAMES C,D,F
(2) VARIABLES FD'',DC''
(3) CONSTANTS R,L
(4) POINTS O,E
(5) SIMPROT(F,D,1,FD)
-> (6) F_D = [1, 0, 0; 0, COS(FD), -SIN(FD); 0, SIN(FD), COS(FD)]
(7) SIMPROT(D,C,2,DC)
-> (8) D_C = [COS(DC), 0, SIN(DC); 0, 1, 0; -SIN(DC), 0, COS(DC)]
(9) W_C_F> = EXPRESS(W_C_F>, F)
-> (10) W_C_F> = FD'*F1> + COS(FD)*DC'*F2> + SIN(FD)*DC'*F3>
(11) P_O_E>=R*D2>-L*C1>
(12) P_O_E>=EXPRESS(P_O_E>, D)
-> (13) P_O_E> = -L*COS(DC)*D1> + R*D2> + L*SIN(DC)*D3>
(14) V_E_F>=EXPRESS(DT(P_O_E>,F),D)
-> (15) V_E_F> = L*SIN(DC)*DC'*D1> - L*SIN(DC)*FD'*D2> + (R*FD'+L*COS(DC)*DC')*D3>
(16) A_E_F>=EXPRESS(DT(V_E_F>,F),D)
-> (17) A_E_F> = L*(COS(DC)*DC'^2+SIN(DC)*DC'')*D1> + (-R*FD'^2-2*L*COS(DC)*DC'*FD'-L*SIN(DC)*FD'')*D2> + (R*FD''+L*COS(DC)*DC''-L*SIN(DC)*DC'^2-L*SIN(DC)*FD'^2)*D3>
"""
if not antlr4:
skip('Test skipped: antlr4 is not installed.')
autolev_input = """\
FRAMES C,D,F
VARIABLES FD'',DC''
CONSTANTS R,L
POINTS O,E
SIMPROT(F,D,1,FD)
SIMPROT(D,C,2,DC)
W_C_F>=EXPRESS(W_C_F>,F)
P_O_E>=R*D2>-L*C1>
P_O_E>=EXPRESS(P_O_E>,D)
V_E_F>=EXPRESS(DT(P_O_E>,F),D)
A_E_F>=EXPRESS(DT(V_E_F>,F),D)\
"""
sympy_input = parse_autolev(autolev_input)
g = {}
l = {}
exec(sympy_input, g, l)
w_c_f = l['frame_c'].ang_vel_in(l['frame_f'])
# P_O_E> means "the position of point E wrt to point O"
p_o_e = l['point_e'].pos_from(l['point_o'])
v_e_f = l['point_e'].vel(l['frame_f'])
a_e_f = l['point_e'].acc(l['frame_f'])
# NOTE : The Autolev outputs above were manually transformed into
# equivalent SymPy physics vector expressions. Would be nice to automate
# this transformation.
expected_w_c_f = (l['fd'].diff()*l['frame_f'].x +
cos(l['fd'])*l['dc'].diff()*l['frame_f'].y +
sin(l['fd'])*l['dc'].diff()*l['frame_f'].z)
assert (w_c_f - expected_w_c_f).simplify() == 0
expected_p_o_e = (-l['l']*cos(l['dc'])*l['frame_d'].x +
l['r']*l['frame_d'].y +
l['l']*sin(l['dc'])*l['frame_d'].z)
assert (p_o_e - expected_p_o_e).simplify() == 0
expected_v_e_f = (l['l']*sin(l['dc'])*l['dc'].diff()*l['frame_d'].x -
l['l']*sin(l['dc'])*l['fd'].diff()*l['frame_d'].y +
(l['r']*l['fd'].diff() +
l['l']*cos(l['dc'])*l['dc'].diff())*l['frame_d'].z)
assert (v_e_f - expected_v_e_f).simplify() == 0
expected_a_e_f = (l['l']*(cos(l['dc'])*l['dc'].diff()**2 +
sin(l['dc'])*l['dc'].diff().diff())*l['frame_d'].x +
(-l['r']*l['fd'].diff()**2 -
2*l['l']*cos(l['dc'])*l['dc'].diff()*l['fd'].diff() -
l['l']*sin(l['dc'])*l['fd'].diff().diff())*l['frame_d'].y +
(l['r']*l['fd'].diff().diff() +
l['l']*cos(l['dc'])*l['dc'].diff().diff() -
l['l']*sin(l['dc'])*l['dc'].diff()**2 -
l['l']*sin(l['dc'])*l['fd'].diff()**2)*l['frame_d'].z)
assert (a_e_f - expected_a_e_f).simplify() == 0
|
the-stack_0_21619 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import multiprocessing.pool
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class ModelInputsTest(test.TestCase):
def test_single_thing(self):
a = np.ones(10)
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals))
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertTrue(tensor_util.is_tensor(vals[0]))
self.assertEqual(backend.floatx(), vals[0].dtype)
def test_single_thing_eager(self):
with context.eager_mode():
a = np.ones(10, dtype=np.int32)
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
val = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(val))
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
self.assertEqual(dtypes.int32, vals[0].dtype)
def test_list(self):
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals[0]))
self.assertTrue(tensor_util.is_tensor(vals[1]))
def test_list_eager(self):
with context.eager_mode():
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
self.assertTrue(tf_utils.is_symbolic_tensor(vals[1]))
def test_dict(self):
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals['a']))
self.assertTrue(tensor_util.is_tensor(vals['b']))
def test_dict_eager(self):
with context.eager_mode():
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(vals['a']))
self.assertTrue(tf_utils.is_symbolic_tensor(vals['b']))
class DatasetUtilsTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
# pylint: disable=g-long-lambda
('Batch', lambda: dataset_ops.Dataset.range(5).batch(2), ValueError),
('Cache', lambda: dataset_ops.Dataset.range(5).cache()),
('Concatenate', lambda: dataset_ops.Dataset.range(5).concatenate(
dataset_ops.Dataset.range(5))),
('FlatMap', lambda: dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensors(0)), ValueError),
('Filter', lambda: dataset_ops.Dataset.range(5).filter(lambda _: True)),
('FixedLengthRecordDatasetV2',
lambda: readers.FixedLengthRecordDatasetV2([], 42)),
('FromTensors', lambda: dataset_ops.Dataset.from_tensors(0)),
('FromTensorSlices',
lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0])),
('Interleave', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1),
ValueError),
('ParallelInterleave', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0),
cycle_length=1,
num_parallel_calls=1), ValueError),
('Map', lambda: dataset_ops.Dataset.range(5).map(lambda x: x)),
('Options',
lambda: dataset_ops.Dataset.range(5).with_options(dataset_ops.Options())
),
('PaddedBatch', lambda: dataset_ops.Dataset.range(5).padded_batch(2, []),
ValueError),
('ParallelMap', lambda: dataset_ops.Dataset.range(5).map(
lambda x: x, num_parallel_calls=1)),
('Prefetch', lambda: dataset_ops.Dataset.range(5).prefetch(1)),
('Range', lambda: dataset_ops.Dataset.range(0)),
('Repeat', lambda: dataset_ops.Dataset.range(0).repeat(0)),
('Shuffle', lambda: dataset_ops.Dataset.range(5).shuffle(1)),
('Skip', lambda: dataset_ops.Dataset.range(5).skip(2)),
('Take', lambda: dataset_ops.Dataset.range(5).take(2)),
('TextLineDataset', lambda: readers.TextLineDatasetV2([])),
('TFRecordDataset', lambda: readers.TFRecordDatasetV2([])),
('Window', lambda: dataset_ops.Dataset.range(5).window(2), ValueError),
('Zip', lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5))),
# pylint: enable=g-long-lambda
)
def test_assert_not_batched(self, dataset_fn, expected_error=None):
if expected_error is None:
training_utils.assert_not_batched(dataset_fn())
else:
with self.assertRaises(expected_error):
training_utils.assert_not_batched(dataset_fn())
@parameterized.named_parameters(
# pylint: disable=g-long-lambda
('Batch', lambda: dataset_ops.Dataset.range(5).batch(2)),
('Cache', lambda: dataset_ops.Dataset.range(5).cache()),
('Concatenate', lambda: dataset_ops.Dataset.range(5).concatenate(
dataset_ops.Dataset.range(5))),
('FlatMap', lambda: dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensors(0)), ValueError),
('Filter', lambda: dataset_ops.Dataset.range(5).filter(lambda _: True)),
('FixedLengthRecordDatasetV2',
lambda: readers.FixedLengthRecordDatasetV2([], 42)),
('FromTensors', lambda: dataset_ops.Dataset.from_tensors(0)),
('FromTensorSlices',
lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0])),
('Interleave', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1),
ValueError),
('Map', lambda: dataset_ops.Dataset.range(5).map(lambda x: x)),
('Options',
lambda: dataset_ops.Dataset.range(5).with_options(dataset_ops.Options())
),
('PaddedBatch', lambda: dataset_ops.Dataset.range(5).padded_batch(2, [])),
('ParallelInterleave', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0),
cycle_length=1,
num_parallel_calls=1), ValueError),
('ParallelMap', lambda: dataset_ops.Dataset.range(5).map(
lambda x: x, num_parallel_calls=1)),
('Prefetch', lambda: dataset_ops.Dataset.range(5).prefetch(1)),
('Range', lambda: dataset_ops.Dataset.range(0)),
('Repeat', lambda: dataset_ops.Dataset.range(0).repeat(0)),
('Shuffle', lambda: dataset_ops.Dataset.range(5).shuffle(1), ValueError),
('Skip', lambda: dataset_ops.Dataset.range(5).skip(2)),
('Take', lambda: dataset_ops.Dataset.range(5).take(2)),
('TextLineDataset', lambda: readers.TextLineDatasetV2([])),
('TFRecordDataset', lambda: readers.TFRecordDatasetV2([])),
('Window', lambda: dataset_ops.Dataset.range(5).window(2)),
('Zip', lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5))),
# pylint: enable=g-long-lambda
)
def test_assert_not_shuffled(self, dataset_fn, expected_error=None):
if expected_error is None:
training_utils.assert_not_shuffled(dataset_fn())
else:
with self.assertRaises(expected_error):
training_utils.assert_not_shuffled(dataset_fn())
def test_verify_dataset_shuffled(self):
dataset = dataset_ops.Dataset.range(5)
training_utils.assert_not_shuffled(dataset)
with test.mock.patch.object(logging, 'warning') as mock_log:
training_utils.verify_dataset_shuffled(dataset)
self.assertRegexpMatches(
str(mock_log.call_args),
'input dataset `x` is not shuffled.')
shuffled_dataset = dataset.shuffle(10)
training_utils.verify_dataset_shuffled(shuffled_dataset)
class StandardizeWeightsTest(keras_parameterized.TestCase):
def test_sample_weights(self):
y = np.array([0, 1, 0, 0, 2])
sample_weights = np.array([0.5, 1., 1., 0., 2.])
weights = training_utils.standardize_weights(y, sample_weights)
self.assertAllClose(weights, sample_weights)
def test_class_weights(self):
y = np.array([0, 1, 0, 0, 2])
class_weights = {0: 0.5, 1: 1., 2: 1.5}
weights = training_utils.standardize_weights(y, class_weight=class_weights)
self.assertAllClose(weights, np.array([0.5, 1., 0.5, 0.5, 1.5]))
def test_sample_weights_and_class_weights(self):
y = np.array([0, 1, 0, 0, 2])
sample_weights = np.array([0.5, 1., 1., 0., 2.])
class_weights = {0: 0.5, 1: 1., 2: 1.5}
weights = training_utils.standardize_weights(y, sample_weights,
class_weights)
expected = sample_weights * np.array([0.5, 1., 0.5, 0.5, 1.5])
self.assertAllClose(weights, expected)
def test_dataset_with_class_weight(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile('rmsprop', 'mse')
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
class_weight_np = np.array([0.25, 0.25, 0.25, 0.25])
class_weight = dict(enumerate(class_weight_np))
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=1,
class_weight=class_weight)
class MonitoredPool(multiprocessing.pool.ThreadPool):
def __init__(self, *args, **kwargs):
self._apply_counter = 0
self._func_wrapper = None
super(MonitoredPool, self).__init__(*args, **kwargs)
def apply_async(self, func, *args, **kwargs):
self._apply_counter += 1
if self._func_wrapper:
func = self._func_wrapper(func) # pylint: disable=not-callable
return super(MonitoredPool, self).apply_async(func, *args, **kwargs)
def add_sleep(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
time.sleep(1.)
return f(*args, **kwargs)
return wrapped
def cause_error(f):
@functools.wraps(f)
def wrapped(batch_element, batch_start, batch_end, is_finished): # pylint: disable=unused-argument
# Induce a TypeError during assignment.
return f(None, None, None, is_finished)
return wrapped
_TEST_DATA = np.array((
(3, 1, 3, 1, 2, 0, 3, 3, 1, 2),
(0, 1, 2, 1, 3, 0, 0, 1, 3, 0),
(3, 2, 1, 1, 1, 1, 1, 3, 2, 3),
(2, 2, 0, 1, 0, 3, 3, 2, 1, 1),
(3, 0, 3, 3, 3, 2, 1, 0, 0, 1),
(1, 0, 3, 3, 3, 2, 1, 2, 3, 1),))
class AggregationTest(keras_parameterized.TestCase):
def setUp(self):
super(AggregationTest, self).setUp()
self._old_pool = training_utils._COPY_POOL
self._old_threshold = training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD
self._old_timeout = training_utils.SliceAggregator._MAX_COPY_SECONDS
training_utils._COPY_POOL = MonitoredPool(training_utils._COPY_THREADS)
def tearDown(self):
super(AggregationTest, self).tearDown()
training_utils._COPY_POOL = self._old_pool
training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = self._old_threshold
training_utils.SliceAggregator._MAX_COPY_SECONDS = self._old_timeout
def _run_with_steps(self):
aggregator = training_utils.OutputsAggregator(use_steps=True)
for i, batch in enumerate(np.array_split(_TEST_DATA, 4)):
if i == 0:
aggregator.create(batch)
aggregator.aggregate(batch)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils.ConcatAggregator)
aggregator.finalize()
return aggregator.results
def _run_without_steps(self):
aggregator = training_utils.OutputsAggregator(
use_steps=False, num_samples=6)
batch_start = 0
for i, batch in enumerate(np.array_split(_TEST_DATA, 4)):
if i == 0:
aggregator.create(batch)
batch_end = batch_start + batch.shape[0]
aggregator.aggregate(batch, batch_start, batch_end)
batch_start = batch_end
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils.SliceAggregator)
aggregator.finalize()
return aggregator.results
def test_with_steps(self):
self.assertAllEqual(self._run_with_steps(), _TEST_DATA)
def test_without_steps(self):
self.assertAllEqual(self._run_without_steps(), _TEST_DATA)
def test_nested_aggregation(self):
aggregator = training_utils.OutputsAggregator(
use_steps=False, num_samples=6)
batches = np.array_split(_TEST_DATA, 4)
batch_start = 0
for i, batch in enumerate(zip(batches, batches)):
if i == 0:
aggregator.create(batch)
batch_end = batch_start + batch[0].shape[0]
aggregator.aggregate(batch, batch_start, batch_end)
batch_start = batch_end
assert len(aggregator.results) == 2
aggregator.finalize()
self.assertAllEqual(aggregator.results, (_TEST_DATA, _TEST_DATA))
def test_concat_single_batch(self):
aggregator = training_utils.OutputsAggregator(use_steps=True)
data = _TEST_DATA.copy()
aggregator.create(data)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils.ConcatAggregator)
aggregator.aggregate(data)
aggregator.finalize()
assert aggregator.results is data # No copy.
def test_slice_single_batch(self):
aggregator = training_utils.OutputsAggregator(
use_steps=False, num_samples=6)
data = _TEST_DATA.copy()
aggregator.create(data)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils.SliceAggregator)
aggregator.aggregate(data, 0, 6)
aggregator.finalize()
assert aggregator.results is data # No copy.
def test_async_copy(self):
training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
self.assertAllEqual(self._run_without_steps(), _TEST_DATA)
# Two of the four batches will have 20 elements and two will have 10.
self.assertEqual(training_utils._COPY_POOL._apply_counter, 2)
def test_async_copy_timeout(self):
training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
training_utils.SliceAggregator._MAX_COPY_SECONDS = 0.1
training_utils._COPY_POOL._func_wrapper = add_sleep
with self.assertRaisesRegexp(ValueError, 'Timed out waiting for copy'):
self._run_without_steps()
def test_async_copy_reraise(self):
training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
training_utils.SliceAggregator._MAX_COPY_SECONDS = 1.
training_utils._COPY_POOL._func_wrapper = cause_error
with self.assertRaisesRegexp(TypeError, 'NoneType'):
self._run_without_steps()
if __name__ == '__main__':
test.main()
|
the-stack_0_21620 | import unittest
import warnings
from collections import OrderedDict
import numpy as np
import numpy.testing as np_test
from pgmpy.extern.six.moves import range
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors.discrete import JointProbabilityDistribution as JPD
from pgmpy.factors import factor_divide
from pgmpy.factors import factor_product
from pgmpy.factors.discrete.CPD import TabularCPD
from pgmpy.independencies import Independencies
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
class TestFactorInit(unittest.TestCase):
def test_class_init(self):
phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
self.assertEqual(phi.variables, ['x1', 'x2', 'x3'])
np_test.assert_array_equal(phi.cardinality, np.array([2, 2, 2]))
np_test.assert_array_equal(phi.values, np.ones(8).reshape(2, 2, 2))
def test_class_init1(self):
phi = DiscreteFactor([1, 2, 3], [2, 3, 2], np.arange(12))
self.assertEqual(phi.variables, [1, 2, 3])
np_test.assert_array_equal(phi.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(phi.values, np.arange(12).reshape(2, 3, 2))
def test_class_init_sizeerror(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(9))
def test_class_init_typeerror(self):
self.assertRaises(TypeError, DiscreteFactor, 'x1', [3], [1, 2, 3])
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x1', 'x3'], [2, 3, 2], range(12))
def test_init_size_var_card_not_equal(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2'], [2], np.ones(2))
class TestFactorMethods(unittest.TestCase):
def setUp(self):
self.phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.random.uniform(5, 10, size=8))
self.phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
self.phi2 = DiscreteFactor([('x1', 0), ('x2', 0), ('x3', 0)], [2, 3, 2], range(12))
# This larger factor (phi3) caused a bug in reduce
card3 = [3, 3, 3, 2, 2, 2, 2, 2, 2]
self.phi3 = DiscreteFactor(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
card3, np.arange(np.prod(card3), dtype=np.float))
self.tup1 = ('x1', 'x2')
self.tup2 = ('x2', 'x3')
self.tup3 = ('x3', (1, 'x4'))
self.phi4 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], np.random.uniform(3, 10, size=24))
self.phi5 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], range(24))
self.card6 = [4, 2, 1, 3, 5, 6]
self.phi6 = DiscreteFactor([self.tup1, self.tup2, self.tup3, self.tup1 + self.tup2,
self.tup2 + self.tup3, self.tup3 + self.tup1], self.card6,
np.arange(np.prod(self.card6), dtype=np.float))
self.var1 = 'x1'
self.var2 = ('x2', 1)
self.var3 = frozenset(['x1', 'x2'])
self.phi7 = DiscreteFactor([self.var1, self.var2], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi8 = DiscreteFactor([self.var2, self.var3], [2, 2], [2, 1, 5, 6])
self.phi9 = DiscreteFactor([self.var1, self.var3], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi10 = DiscreteFactor([self.var3], [2], [3, 6])
def test_scope(self):
self.assertListEqual(self.phi.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi1.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi4.scope(), [self.tup1, self.tup2, self.tup3])
def test_assignment(self):
self.assertListEqual(self.phi.assignment([0]), [[('x1', 0), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi.assignment([4, 5, 6]), [[('x1', 1), ('x2', 0), ('x3', 0)],
[('x1', 1), ('x2', 0), ('x3', 1)],
[('x1', 1), ('x2', 1), ('x3', 0)]])
self.assertListEqual(self.phi1.assignment(np.array([4, 5, 6])), [[('x1', 0), ('x2', 2), ('x3', 0)],
[('x1', 0), ('x2', 2), ('x3', 1)],
[('x1', 1), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi4.assignment(np.array([11, 12, 23])),
[[(self.tup1, 0), (self.tup2, 2), (self.tup3, 3)],
[(self.tup1, 1), (self.tup2, 0), (self.tup3, 0)],
[(self.tup1, 1), (self.tup2, 2), (self.tup3, 3)]])
def test_assignment_indexerror(self):
self.assertRaises(IndexError, self.phi.assignment, [10])
self.assertRaises(IndexError, self.phi.assignment, [1, 3, 10, 5])
self.assertRaises(IndexError, self.phi.assignment, np.array([1, 3, 10, 5]))
self.assertRaises(IndexError, self.phi4.assignment, [2, 24])
self.assertRaises(IndexError, self.phi4.assignment, np.array([24, 2, 4, 30]))
def test_get_cardinality(self):
self.assertEqual(self.phi.get_cardinality(['x1']), {'x1': 2})
self.assertEqual(self.phi.get_cardinality(['x2']), {'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x3']), {'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2']), {'x1': 2, 'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x3']), {'x1': 2, 'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 2, 'x3': 2})
self.assertEqual(self.phi4.get_cardinality([self.tup1, self.tup3]),
{self.tup1: 2, self.tup3: 4})
def test_get_cardinality_scopeerror(self):
self.assertRaises(ValueError, self.phi.get_cardinality, ['x4'])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x1', 'x4')])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x3', (2, 'x4'))])
def test_get_cardinality_typeerror(self):
self.assertRaises(TypeError, self.phi.get_cardinality, 'x1')
def test_marginalize(self):
self.phi1.marginalize(['x1'])
np_test.assert_array_equal(self.phi1.values, np.array([[6, 8],
[10, 12],
[14, 16]]))
self.phi1.marginalize(['x2'])
np_test.assert_array_equal(self.phi1.values, np.array([30, 36]))
self.phi1.marginalize(['x3'])
np_test.assert_array_equal(self.phi1.values, np.array(66))
self.phi5.marginalize([self.tup1])
np_test.assert_array_equal(self.phi5.values, np.array([[12, 14, 16, 18],
[20, 22, 24, 26],
[28, 30, 32, 34]]))
self.phi5.marginalize([self.tup2])
np_test.assert_array_equal(self.phi5.values, np.array([60, 66, 72, 78]))
self.phi5.marginalize([self.tup3])
np_test.assert_array_equal(self.phi5.values, np.array([276]))
def test_marginalize_scopeerror(self):
self.assertRaises(ValueError, self.phi.marginalize, ['x4'])
self.phi.marginalize(['x1'])
self.assertRaises(ValueError, self.phi.marginalize, ['x1'])
self.assertRaises(ValueError, self.phi4.marginalize, [('x1', 'x3')])
self.phi4.marginalize([self.tup2])
self.assertRaises(ValueError, self.phi4.marginalize, [self.tup2])
def test_marginalize_typeerror(self):
self.assertRaises(TypeError, self.phi.marginalize, 'x1')
def test_marginalize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_mar = self.phi3.marginalize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_mar.values.shape, phi3_mar.cardinality)
phi6_mar = self.phi6.marginalize([self.tup1, self.tup2], inplace=False)
np_test.assert_array_equal(phi6_mar.values.shape, phi6_mar.cardinality)
self.phi6.marginalize([self.tup1, self.tup3 + self.tup1], inplace=True)
np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality)
def test_normalize(self):
self.phi1.normalize()
np_test.assert_almost_equal(self.phi1.values,
np.array([[[0, 0.01515152],
[0.03030303, 0.04545455],
[0.06060606, 0.07575758]],
[[0.09090909, 0.10606061],
[0.12121212, 0.13636364],
[0.15151515, 0.16666667]]]))
self.phi5.normalize()
np_test.assert_almost_equal(self.phi5.values,
[[[0., 0.00362319, 0.00724638, 0.01086957],
[0.01449275, 0.01811594, 0.02173913, 0.02536232],
[0.02898551, 0.0326087, 0.03623188, 0.03985507]],
[[0.04347826, 0.04710145, 0.05072464, 0.05434783],
[0.05797101, 0.0615942, 0.06521739, 0.06884058],
[0.07246377, 0.07608696, 0.07971014, 0.08333333]]])
def test_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
self.phi5.reduce([(self.tup1, 0), (self.tup3, 1)])
np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9]))
def test_reduce1(self):
self.phi1.reduce([('x2', 0), ('x1', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
self.phi5.reduce([(self.tup3, 1), (self.tup1, 0)])
np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9]))
def test_reduce_shape(self):
values = [('A', 0), ('D', 0), ('F', 0), ('H', 1)]
phi3_reduced = self.phi3.reduce(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_reduced.values.shape, phi3_reduced.cardinality)
values = [(self.tup1, 2), (self.tup3, 0)]
phi6_reduced = self.phi6.reduce(values, inplace=False)
np_test.assert_array_equal(phi6_reduced.values.shape, phi6_reduced.cardinality)
self.phi6.reduce(values, inplace=True)
np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality)
def test_complete_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0), ('x3', 1)])
np_test.assert_array_equal(self.phi1.values, np.array([1]))
np_test.assert_array_equal(self.phi1.cardinality, np.array([]))
np_test.assert_array_equal(self.phi1.variables, OrderedDict())
self.phi5.reduce([(('x1', 'x2'), 1), (('x2', 'x3'), 0), (('x3', (1, 'x4')), 3)])
np_test.assert_array_equal(self.phi5.values, np.array([15]))
np_test.assert_array_equal(self.phi5.cardinality, np.array([]))
np_test.assert_array_equal(self.phi5.variables, OrderedDict())
def test_reduce_typeerror(self):
self.assertRaises(TypeError, self.phi1.reduce, 'x10')
self.assertRaises(TypeError, self.phi1.reduce, ['x10'])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 'x2')])
self.assertRaises(TypeError, self.phi1.reduce, [(0, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 0.1)])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 0.1)])
self.assertRaises(TypeError, self.phi5.reduce, [(('x1', 'x2'), 0), (('x2', 'x3'), 0.2)])
def test_reduce_scopeerror(self):
self.assertRaises(ValueError, self.phi1.reduce, [('x4', 1)])
self.assertRaises(ValueError, self.phi5.reduce, [((('x1', 0.1), 0))])
def test_reduce_sizeerror(self):
self.assertRaises(IndexError, self.phi1.reduce, [('x3', 5)])
self.assertRaises(IndexError, self.phi5.reduce, [(('x2', 'x3'), 3)])
def test_identity_factor(self):
identity_factor = self.phi.identity_factor()
self.assertEqual(list(identity_factor.variables), ['x1', 'x2', 'x3'])
np_test.assert_array_equal(identity_factor.cardinality, [2, 2, 2])
np_test.assert_array_equal(identity_factor.values, np.ones(8).reshape(2, 2, 2))
identity_factor1 = self.phi5.identity_factor()
self.assertEqual(list(identity_factor1.variables), [self.tup1, self.tup2, self.tup3])
np_test.assert_array_equal(identity_factor1.cardinality, [2, 3, 4])
np_test.assert_array_equal(identity_factor1.values, np.ones(24).reshape(2, 3, 4))
def test_factor_product(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = DiscreteFactor(['x1', 'x2'], [3, 2], range(6))
phi1 = DiscreteFactor(['x2', 'x3'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(prod.variables, expected_factor.variables)
prod = factor_product(self.phi7, self.phi8)
expected_factor = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2],
[6, 3, 10, 12, 8, 4, 25, 30, 18, 9, 40, 48])
self.assertEqual(prod, expected_factor)
self.assertEqual(prod.variables, expected_factor.variables)
def test_product(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = DiscreteFactor(['x1', 'x2'], [3, 2], range(6))
phi1 = DiscreteFactor(['x2', 'x3'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3'])
phi7_copy = self.phi7
phi7_copy.product(self.phi8, inplace=True)
expected_factor = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2],
[6, 3, 10, 12, 8, 4, 25, 30, 18, 9, 40, 48])
self.assertEqual(expected_factor, phi7_copy)
self.assertEqual(phi7_copy.variables, [self.var1, self.var2, self.var3])
def test_factor_product_non_factor_arg(self):
self.assertRaises(TypeError, factor_product, 1, 2)
def test_factor_mul(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = phi * phi1
sorted_vars = ['x1', 'x2', 'x3', 'x4']
for axis in range(prod.values.ndim):
exchange_index = prod.variables.index(sorted_vars[axis])
prod.variables[axis], prod.variables[exchange_index] = prod.variables[exchange_index], prod.variables[axis]
prod.values = prod.values.swapaxes(axis, exchange_index)
np_test.assert_almost_equal(prod.values.ravel(),
np.array([0, 0, 0, 0, 0, 1, 2, 3,
0, 2, 4, 6, 0, 3, 6, 9]))
self.assertEqual(prod.variables, ['x1', 'x2', 'x3', 'x4'])
def test_factor_divide(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = DiscreteFactor(['x1'], [2], [1, 2])
expected_factor = phi1.divide(phi2, inplace=False)
phi3 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, expected_factor)
self.phi9.divide(self.phi10, inplace=True)
np_test.assert_array_almost_equal(self.phi9.values, np.array([1.000000, 0.333333, 1.333333,
0.833333, 3.000000, 1.333333]).reshape(3, 2))
self.assertEqual(self.phi9.variables, [self.var1, self.var3])
def test_factor_divide_truediv(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = DiscreteFactor(['x1'], [2], [1, 2])
div = phi1 / phi2
phi3 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, div)
self.phi9 = self.phi9 / self.phi10
np_test.assert_array_almost_equal(self.phi9.values, np.array([1.000000, 0.333333, 1.333333,
0.833333, 3.000000, 1.333333]).reshape(3, 2))
self.assertEqual(self.phi9.variables, [self.var1, self.var3])
def test_factor_divide_invalid(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x1'], [2], [0, 2])
div = phi1.divide(phi2, inplace=False)
np_test.assert_array_equal(div.values.ravel(), np.array([np.inf, np.inf, 1.5, 2]))
def test_factor_divide_no_common_scope(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x3'], [2], [0, 2])
self.assertRaises(ValueError, factor_divide, phi1, phi2)
phi2 = DiscreteFactor([self.var3], [2], [2, 1])
self.assertRaises(ValueError, factor_divide, self.phi7, phi2)
def test_factor_divide_non_factor_arg(self):
self.assertRaises(TypeError, factor_divide, 1, 1)
def test_eq(self):
self.assertFalse(self.phi == self.phi1)
self.assertTrue(self.phi == self.phi)
self.assertTrue(self.phi1 == self.phi1)
self.assertTrue(self.phi5 == self.phi5)
self.assertFalse(self.phi5 == self.phi6)
self.assertTrue(self.phi6 == self.phi6)
def test_eq1(self):
phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 4, 3], range(24))
phi2 = DiscreteFactor(['x2', 'x1', 'x3'], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17, 6, 7,
8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertTrue(phi1 == phi2)
self.assertEqual(phi2.variables, ['x2', 'x1', 'x3'])
phi3 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 4, 3], range(24))
phi4 = DiscreteFactor([self.tup2, self.tup1, self.tup3], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17,
6, 7, 8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertTrue(phi3 == phi4)
def test_hash(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x2', 'x1'], [2, 2], [1, 3, 2, 4])
self.assertEqual(hash(phi1), hash(phi2))
phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], range(8))
phi2 = DiscreteFactor(['x3', 'x1', 'x2'], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi1), hash(phi2))
var1 = TestHash(1, 2)
phi3 = DiscreteFactor([var1, self.var2, self.var3], [2, 4, 3], range(24))
phi4 = DiscreteFactor([self.var2, var1, self.var3], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17,
6, 7, 8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertEqual(hash(phi3), hash(phi4))
var1 = TestHash(2, 3)
var2 = TestHash('x2', 1)
phi3 = DiscreteFactor([var1, var2, self.var3], [2, 2, 2], range(8))
phi4 = DiscreteFactor([self.var3, var1, var2], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi3), hash(phi4))
def test_maximize_single(self):
self.phi1.maximize(['x1'])
self.assertEqual(self.phi1, DiscreteFactor(['x2', 'x3'], [3, 2], [6, 7, 8, 9, 10, 11]))
self.phi1.maximize(['x2'])
self.assertEqual(self.phi1, DiscreteFactor(['x3'], [2], [10, 11]))
self.phi2 = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2], [0.25, 0.35, 0.08, 0.16, 0.05, 0.07,
0.00, 0.00, 0.15, 0.21, 0.08, 0.18])
self.phi2.maximize(['x2'])
self.assertEqual(self.phi2, DiscreteFactor(['x1', 'x3'], [3, 2], [0.25, 0.35, 0.05,
0.07, 0.15, 0.21]))
self.phi5.maximize([('x1', 'x2')])
self.assertEqual(self.phi5, DiscreteFactor([('x2', 'x3'), ('x3', (1, 'x4'))], [3, 4],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]))
self.phi5.maximize([('x2', 'x3')])
self.assertEqual(self.phi5, DiscreteFactor([('x3', (1, 'x4'))], [4], [20, 21, 22, 23]))
def test_maximize_list(self):
self.phi1.maximize(['x1', 'x2'])
self.assertEqual(self.phi1, DiscreteFactor(['x3'], [2], [10, 11]))
self.phi5.maximize([('x1', 'x2'), ('x2', 'x3')])
self.assertEqual(self.phi5, DiscreteFactor([('x3', (1, 'x4'))], [4], [20, 21, 22, 23]))
def test_maximize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_max = self.phi3.maximize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_max.values.shape, phi3_max.cardinality)
phi = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2], [3, 2, 4, 5, 9, 8, 3, 2, 4, 5, 9, 8])
phi_max = phi.marginalize([self.var1, self.var2], inplace=False)
np_test.assert_array_equal(phi_max.values.shape, phi_max.cardinality)
def test_maximize_scopeerror(self):
self.assertRaises(ValueError, self.phi.maximize, ['x10'])
def test_maximize_typeerror(self):
self.assertRaises(TypeError, self.phi.maximize, 'x1')
def tearDown(self):
del self.phi
del self.phi1
del self.phi2
del self.phi3
del self.phi4
del self.phi5
del self.phi6
del self.phi7
del self.phi8
del self.phi9
del self.phi10
class TestHash:
# Used to check the hash function of DiscreteFactor class.
def __init__(self, x, y):
self.x = x
self.y = y
def __hash__(self):
return hash(str(self.x) + str(self.y))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.x == other.x and self.y == other.y
class TestTabularCPDInit(unittest.TestCase):
def test_cpd_init(self):
cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1]])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
self.assertEqual(list(cpd.variables), ['grade'])
np_test.assert_array_equal(cpd.cardinality, np.array([3]))
np_test.assert_array_almost_equal(cpd.values, np.array([0.1, 0.1, 0.1]))
values = [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]
evidence = ['intel', 'diff']
evidence_card = [3, 2]
valid_value_inputs = [values, np.asarray(values)]
valid_evidence_inputs = [evidence, set(evidence), np.asarray(evidence)]
valid_evidence_card_inputs = [evidence_card, np.asarray(evidence_card)]
for value in valid_value_inputs:
for evidence in valid_evidence_inputs:
for evidence_card in valid_evidence_card_inputs:
cpd = TabularCPD('grade', 3, values, evidence=['intel', 'diff'], evidence_card=[3, 2])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'intel', 'diff'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.8, 0.8, 0.8, 0.8, 0.8, 0.8]).reshape(3, 3, 2))
cpd = TabularCPD('grade', 3, [[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]],
evidence=['evi1'], evidence_card=[2.0])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'evi1'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1,
0.1, 0.1,
0.8, 0.8]).reshape(3, 2))
def test_cpd_init_event_card_not_int(self):
self.assertRaises(TypeError, TabularCPD, 'event', '2', [[0.1, 0.9]])
def test_cpd_init_cardinality_not_specified(self):
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5])
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5.0])
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1'], [5, 6])
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
'evi1', [5, 6])
def test_cpd_init_value_not_2d(self):
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]],
['evi1', 'evi2'], [5, 6])
class TestTabularCPDMethods(unittest.TestCase):
def setUp(self):
self.cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
self.cpd2 = TabularCPD('J', 2, [[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]],
evidence=['A', 'B', 'C'], evidence_card=[2, 2, 2])
def test_marginalize_1(self):
self.cpd.marginalize(['diff'])
self.assertEqual(self.cpd.variable, 'grade')
self.assertEqual(self.cpd.variable_card, 3)
self.assertListEqual(list(self.cpd.variables), ['grade', 'intel'])
np_test.assert_array_equal(self.cpd.cardinality, np.array([3, 3]))
np_test.assert_array_equal(self.cpd.values.ravel(), np.array([0.1, 0.1, 0.1,
0.1, 0.1, 0.1,
0.8, 0.8, 0.8]))
def test_marginalize_2(self):
self.assertRaises(ValueError, self.cpd.marginalize, ['grade'])
def test_marginalize_3(self):
copy_cpd = self.cpd.copy()
copy_cpd.marginalize(['intel', 'diff'])
self.cpd.marginalize(['intel'])
self.cpd.marginalize(['diff'])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_normalize(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize()
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_not_in_place(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
np_test.assert_array_almost_equal(cpd_un_normalized.normalize(inplace=False).values,
np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_original_safe(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize(inplace=False)
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.7, 0.2], [0.6, 0.2]],
[[0.4, 0.4], [0.4, 0.8]]]))
def test__repr__(self):
grade_cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
diff_cpd = TabularCPD('grade', 3, [[0.1, 0.1], [0.1, 0.1], [0.8, 0.8]], evidence=['diff'], evidence_card=[2])
self.assertEqual(repr(grade_cpd), '<TabularCPD representing P(grade:3 | intel:3, diff:2) at {address}>'
.format(address=hex(id(grade_cpd))))
self.assertEqual(repr(intel_cpd), '<TabularCPD representing P(intel:3) at {address}>'
.format(address=hex(id(intel_cpd))))
self.assertEqual(repr(diff_cpd), '<TabularCPD representing P(grade:3 | diff:2) at {address}>'
.format(address=hex(id(diff_cpd))))
def test_copy(self):
copy_cpd = self.cpd.copy()
np_test.assert_array_equal(self.cpd.get_values(), copy_cpd.get_values())
def test_copy_original_safe(self):
copy_cpd = self.cpd.copy()
copy_cpd.reorder_parents(['diff', 'intel'])
np_test.assert_array_equal(self.cpd.get_values(),
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reduce_1(self):
self.cpd.reduce([('diff', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.8, 0.8, 0.8]]))
def test_reduce_2(self):
self.cpd.reduce([('intel', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]]))
def test_reduce_3(self):
self.cpd.reduce([('intel', 0), ('diff', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1],
[0.1],
[0.8]]))
def test_reduce_4(self):
self.assertRaises(ValueError, self.cpd.reduce, [('grade', 0)])
def test_reduce_5(self):
copy_cpd = self.cpd.copy()
copy_cpd.reduce([('intel', 2), ('diff', 1)])
self.cpd.reduce([('intel', 2)])
self.cpd.reduce([('diff', 1)])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_get_values(self):
np_test.assert_array_equal(self.cpd.get_values(),
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reorder_parents_inplace(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents_no_effect(self):
self.cpd2.reorder_parents(['C', 'A', 'B'], inplace=False)
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def test_reorder_parents_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cpd2.reorder_parents(['A', 'B', 'C'], inplace=False)
assert("Same ordering provided as current" in str(w[-1].message))
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def tearDown(self):
del self.cpd
class TestJointProbabilityDistributionInit(unittest.TestCase):
def test_jpd_init(self):
jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12) / 12)
np_test.assert_array_equal(jpd.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(jpd.values, np.ones(12).reshape(2, 3, 2) / 12)
self.assertEqual(jpd.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 3, 'x3': 2})
def test_jpd_init_exception(self):
self.assertRaises(ValueError, JPD, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
class TestJointProbabilityDistributionMethods(unittest.TestCase):
def setUp(self):
self.jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd1 = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd2 = JPD(['x1', 'x2', 'x3'], [2, 2, 3],
[0.126, 0.168, 0.126, 0.009, 0.045, 0.126, 0.252, 0.0224, 0.0056, 0.06, 0.036, 0.024])
self.jpd3 = JPD(['x1', 'x2', 'x3'], [2, 2, 2],
[5.0e-04, 5.225e-04, 0.00, 8.9775e-03, 9.9e-03, 5.39055e-02, 0.00, 9.261945e-01])
def test_jpd_marginal_distribution_list(self):
self.jpd.marginal_distribution(['x1', 'x2'])
np_test.assert_array_almost_equal(self.jpd.values,
np.array([[0.16666667, 0.16666667, 0.16666667],
[0.16666667, 0.16666667, 0.16666667]]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2, 3]))
dic = {'x1': 2, 'x2': 3}
self.assertEqual(self.jpd.get_cardinality(['x1', 'x2']), dic)
self.assertEqual(self.jpd.scope(), ['x1', 'x2'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution(['x1', 'x2'], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(new_jpd == self.jpd)
def test_marginal_distribution_str(self):
self.jpd.marginal_distribution('x1')
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x1'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution('x1', inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_conditional_distribution_list(self):
self.jpd = self.jpd1.copy()
self.jpd.conditional_distribution([('x1', 1), ('x2', 0)])
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x3'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.conditional_distribution([('x1', 1), ('x2', 0)], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_check_independence(self):
self.assertTrue(self.jpd2.check_independence(['x1'], ['x2']))
self.assertRaises(TypeError, self.jpd2.check_independence, 'x1', ['x2'])
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], 'x2')
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], ['x2'], 'x3')
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], [('x3', 0)]))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], [('x3', 1)]))
self.assertTrue(self.jpd3.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
def test_get_independencies(self):
independencies = Independencies(['x1', 'x2'], ['x2', 'x3'], ['x3', 'x1'])
independencies1 = Independencies(['x1', 'x2'])
self.assertEqual(self.jpd1.get_independencies(), independencies)
self.assertEqual(self.jpd2.get_independencies(), independencies1)
self.assertEqual(self.jpd1.get_independencies([('x3', 0)]), independencies1)
self.assertEqual(self.jpd2.get_independencies([('x3', 0)]), Independencies())
def test_minimal_imap(self):
bm = self.jpd1.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(sorted(bm.edges()), sorted([('x1', 'x3'), ('x2', 'x3')]))
bm = self.jpd1.minimal_imap(order=['x2', 'x3', 'x1'])
self.assertEqual(sorted(bm.edges()), sorted([('x2', 'x1'), ('x3', 'x1')]))
bm = self.jpd2.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(list(bm.edges()), [])
bm = self.jpd2.minimal_imap(order=['x1', 'x2'])
self.assertEqual(list(bm.edges()), [])
def test_repr(self):
self.assertEqual(repr(self.jpd1), '<Joint Distribution representing P(x1:2, x2:3, x3:2) at {address}>'.format(
address=hex(id(self.jpd1))))
def test_is_imap(self):
G1 = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD('grade', 3,
[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['diff', 'intel'],
evidence_card=[2, 3])
G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
jpd = JPD(['diff', 'intel', 'grade'], [2, 3, 3], val)
self.assertTrue(jpd.is_imap(G1))
self.assertRaises(TypeError, jpd.is_imap, MarkovModel())
def tearDown(self):
del self.jpd
del self.jpd1
del self.jpd2
del self.jpd3
#
# class TestTreeCPDInit(unittest.TestCase):
# def test_init_single_variable_nodes(self):
# tree = TreeCPD([('B', DiscreteFactor(['A'], [2], [0.8, 0.2]), 0),
# ('B', 'C', 1),
# ('C', DiscreteFactor(['A'], [2], [0.1, 0.9]), 0),
# ('C', 'D', 1),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), 0),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), 1)])
#
# self.assertTrue('B' in tree.nodes())
# self.assertTrue('C' in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.1, 0.9]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.4, 0.6]) in tree.nodes())
#
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.1, 0.9]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.4, 0.6]) in tree.edges()))
# self.assertTrue(('C', 'D') in tree.edges())
# self.assertTrue(('B', 'C') in tree.edges())
#
# self.assertEqual(tree['B'][DiscreteFactor(['A'], [2], [0.8, 0.2])]['label'], 0)
# self.assertEqual(tree['B']['C']['label'], 1)
# self.assertEqual(tree['C'][DiscreteFactor(['A'], [2], [0.1, 0.9])]['label'], 0)
# self.assertEqual(tree['C']['D']['label'], 1)
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.4, 0.6])]['label'], 1)
#
# self.assertRaises(ValueError, tree.add_edges_from, [('F', 'G')])
#
# def test_init_self_loop(self):
# self.assertRaises(ValueError, TreeCPD, [('B', 'B', 0)])
#
# def test_init_cycle(self):
# self.assertRaises(ValueError, TreeCPD, [('A', 'B', 0), ('B', 'C', 1), ('C', 'A', 0)])
#
# def test_init_multi_variable_nodes(self):
# tree = TreeCPD([(('B', 'C'), DiscreteFactor(['A'], [2], [0.8, 0.2]), (0, 0)),
# (('B', 'C'), 'D', (0, 1)),
# (('B', 'C'), DiscreteFactor(['A'], [2], [0.1, 0.9]), (1, 0)),
# (('B', 'C'), 'E', (1, 1)),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), 0),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), 1),
# ('E', DiscreteFactor(['A'], [2], [0.3, 0.7]), 0),
# ('E', DiscreteFactor(['A'], [2], [0.8, 0.2]), 1)
# ])
#
# self.assertTrue(('B', 'C') in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue('E' in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.nodes())
#
# self.assertTrue((('B', 'C'), DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue((('B', 'C'), 'E') in tree.edges())
# self.assertTrue(('D', DiscreteFactor(['A'], [2], [0.4, 0.6])) in tree.edges())
# self.assertTrue(('E', DiscreteFactor(['A'], [2], [0.8, 0.2])) in tree.edges())
#
# self.assertEqual(tree[('B', 'C')][DiscreteFactor(['A'], [2], [0.8, 0.2])]['label'], (0, 0))
# self.assertEqual(tree[('B', 'C')]['D']['label'], (0, 1))
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['E'][DiscreteFactor(['A'], [2], [0.3, 0.7])]['label'], 0)
#
#
# class TestTreeCPD(unittest.TestCase):
# def setUp(self):
# self.tree1 = TreeCPD([('B', DiscreteFactor(['A'], [2], [0.8, 0.2]), '0'),
# ('B', 'C', '1'),
# ('C', DiscreteFactor(['A'], [2], [0.1, 0.9]), '0'),
# ('C', 'D', '1'),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), '0'),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), '1')])
#
# self.tree2 = TreeCPD([('C','A','0'),('C','B','1'),
# ('A', DiscreteFactor(['J'], [2], [0.9, 0.1]), '0'),
# ('A', DiscreteFactor(['J'], [2], [0.3, 0.7]), '1'),
# ('B', DiscreteFactor(['J'], [2], [0.8, 0.2]), '0'),
# ('B', DiscreteFactor(['J'], [2], [0.4, 0.6]), '1')])
#
# def test_add_edge(self):
# self.tree1.add_edge('yolo', 'yo', 0)
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
#
# def test_add_edges_from(self):
# self.tree1.add_edges_from([('yolo', 'yo', 0), ('hello', 'world', 1)])
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes() and
# 'hello' in self.tree1.nodes() and 'world' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertTrue(('hello', 'world') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
# self.assertEqual(self.tree1['hello']['world']['label'], 1)
#
# def test_to_tabular_cpd(self):
# tabular_cpd = self.tree1.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'B', 'C', 'D'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.8, 0.8, 0.8, 0.1, 0.1, 0.9, 0.4,
# 0.2, 0.2, 0.2, 0.2, 0.9, 0.9, 0.1, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['A', 'B', 'C'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['J', 'C', 'B', 'A'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([ 0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4,
# 0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]))
#
# @unittest.skip('Not implemented yet')
# def test_to_tabular_cpd_parent_order(self):
# tabular_cpd = self.tree1.to_tabular_cpd('A', parents_order=['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'D', 'C', 'B'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.1, 0.8, 0.9, 0.8, 0.1, 0.8, 0.4,
# 0.2, 0.9, 0.2, 0.1, 0.2, 0.9, 0.2, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd('A', parents_order=['E', 'D', 'C', 'B'])
#
# @unittest.skip('Not implemented yet')
# def test_to_rule_cpd(self):
# rule_cpd = self.tree1.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_1', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_!', 'C_1', 'D_1'): 0.6})
#
# rule_cpd = self.tree2.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2, 'E': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D', 'E'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0', 'C_0'): 0.8,
# ('A_1', 'B_0', 'C_0'): 0.2,
# ('A_0', 'B_0', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_0', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_0', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_0', 'C_1', 'D_1'): 0.6,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_1', 'B_1', 'C_0'): 0.9,
# ('A_0', 'B_1', 'C_1', 'E_0'): 0.3,
# ('A_1', 'B_1', 'C_1', 'E_0'): 0.7,
# ('A_0', 'B_1', 'C_1', 'E_1'): 0.8,
# ('A_1', 'B_1', 'C_1', 'E_1'): 0.2})
#
#
# class TestRuleCPDInit(unittest.TestCase):
# def test_init_without_errors_rules_none(self):
# rule_cpd = RuleCPD('A')
# self.assertEqual(rule_cpd.variable, 'A')
#
# def test_init_without_errors_rules_not_none(self):
# rule_cpd = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
# def test_init_with_errors(self):
# self.assertRaises(ValueError, RuleCPD, 'A', {('A_0',): 0.5,
# ('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
#
# class TestRuleCPDMethods(unittest.TestCase):
# def setUp(self):
# self.rule_cpd_with_rules = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6})
# self.rule_cpd_without_rules = RuleCPD('A')
#
# def test_add_rules_single(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_multiple(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_error(self):
# self.assertRaises(ValueError, self.rule_cpd_with_rules.add_rules, {('A_0',): 0.8})
#
# def test_scope(self):
# self.assertEqual(self.rule_cpd_with_rules.scope(), {'A', 'B', 'C'})
# self.assertEqual(self.rule_cpd_without_rules.scope(), set())
#
# def test_cardinality(self):
# self.assertEqual(self.rule_cpd_with_rules.cardinality(), {'A': 2, 'B': 2, 'C': 1})
# self.assertEqual(self.rule_cpd_without_rules.cardinality(), {})
#
# def tearDown(self):
# del self.rule_cpd_without_rules
#
|
the-stack_0_21621 | # pragma: no cover
from . import Scraper, _user_agent
import argparse
def scrapeshell():
# clear argv for IPython
import sys
orig_argv = sys.argv[1:]
sys.argv = sys.argv[:1]
try:
from IPython import embed
except ImportError:
print('scrapeshell requires ipython >= 0.11')
return
try:
import lxml.html
USE_LXML = True
except ImportError:
USE_LXML = False
parser = argparse.ArgumentParser(prog='scrapeshell',
description='interactive python shell for'
' scraping')
parser.add_argument('url', help="url to scrape")
parser.add_argument('--ua', dest='user_agent', default=_user_agent,
help='user agent to make requests with')
parser.add_argument('-p', '--postdata', dest='postdata',
default=None,
help="POST data (will make a POST instead of GET)")
args = parser.parse_args(orig_argv)
scraper = Scraper()
scraper.user_agent = args.user_agent
url = args.url
if args.postdata:
html = scraper.urlopen(args.url, 'POST', args.postdata)
else:
html = scraper.urlopen(args.url)
if USE_LXML:
doc = lxml.html.fromstring(html.bytes) # noqa
print('local variables')
print('---------------')
print('url: %s' % url)
print('html: `scrapelib.ResultStr` instance')
if USE_LXML:
print('doc: `lxml HTML element`')
else:
print('doc not available: lxml not installed')
embed()
scrapeshell()
|
the-stack_0_21623 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob as gb
import numpy as np
import cv2
import argparse
from deepsort_tracker.reid_model import Extractor
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
class AppearanceFeature(object):
def __init__(self, model_path, use_cuda=True):
self.extractor = Extractor(model_path, use_cuda=use_cuda)
def update(self, output_results, img_file_name):
ori_img = cv2.imread(img_file_name)
self.height, self.width = ori_img.shape[:2]
bboxes = output_results[:, :4] # x1y1x2y2
bbox_xyxy = bboxes
bbox_tlwh = self._xyxy_to_tlwh_array(bbox_xyxy)
# generate detections
features = self._get_features(bbox_tlwh, ori_img)
return features
@staticmethod
def _xyxy_to_tlwh_array(bbox_xyxy):
if isinstance(bbox_xyxy, np.ndarray):
bbox_tlwh = bbox_xyxy.copy()
elif isinstance(bbox_xyxy, torch.Tensor):
bbox_tlwh = bbox_xyxy.clone()
bbox_tlwh[:, 2] = bbox_xyxy[:, 2] - bbox_xyxy[:, 0]
bbox_tlwh[:, 3] = bbox_xyxy[:, 3] - bbox_xyxy[:, 1]
return bbox_tlwh
def _tlwh_to_xyxy(self, bbox_tlwh):
"""
TODO:
Convert bbox from xtl_ytl_w_h to xc_yc_w_h
Thanks [email protected] for reporting this bug!
"""
x, y, w, h = bbox_tlwh
x1 = max(int(x), 0)
x2 = min(int(x+w), self.width - 1)
y1 = max(int(y), 0)
y2 = min(int(y+h), self.height - 1)
return x1, y1, x2, y2
def _xyxy_to_tlwh(self, bbox_xyxy):
x1, y1, x2, y2 = bbox_xyxy
t = x1
l = y1
w = int(x2 - x1)
h = int(y2 - y1)
return t, l, w, h
def _get_features(self, bbox_xywh, ori_img):
im_crops = []
for box in bbox_xywh:
x1, y1, x2, y2 = self._tlwh_to_xyxy(box)
im = ori_img[y1:y2, x1:x2]
im_crops.append(im)
if im_crops:
features = self.extractor(im_crops)
features = np.asarray(features) / np.linalg.norm(features, axis=1, keepdims=True)
else:
features = np.array([])
return features
parser = argparse.ArgumentParser()
parser.add_argument('--start', default=0, type=int)
parser.add_argument('--end', default=25, type=int)
args = parser.parse_args()
# dataset = 'mot/val'
dataset = 'dancetrack/val'
val_pred = 'oracle_analysis/val_appearance'
if not os.path.exists(val_pred):
os.makedirs(val_pred)
video_cosine_dist_ret = []
val_seqs = sorted(os.listdir(dataset))[args.start:args.end+1]
for video_name in val_seqs:
print(video_name)
det_results = {}
with open(os.path.join(dataset, video_name, 'gt/gt.txt'), 'r') as f:
for line in f.readlines():
linelist = line.split(',')
img_id = linelist[0]
bbox = [float(linelist[2]),
float(linelist[3]),
float(linelist[2]) + float(linelist[4]),
float(linelist[3]) + float(linelist[5]),
float(linelist[1])]
if int(linelist[7]) == 1:
if int(img_id) in det_results:
det_results[int(img_id)].append(bbox)
else:
det_results[int(img_id)] = list()
det_results[int(img_id)].append(bbox)
f.close()
cosine_dist_ret = []
star_idx = len(gb.glob(os.path.join(dataset, video_name, 'img1') + "/*.jpg")) // 2 + 1
tracker = AppearanceFeature(model_path='ckpt.t7')
for frame_id in sorted(det_results.keys()):
dets = det_results[frame_id]
dets = np.array(dets)
# image_path = os.path.join(dataset, video_name, 'img1', '{:0>6d}.jpg'.format(frame_id + star_idx))
image_path = os.path.join(dataset, video_name, 'img1', '{:0>8d}.jpg'.format(frame_id))
appearance_feat = tracker.update(dets, image_path)
cosine_dist_mat = 1. - np.dot(appearance_feat, appearance_feat.T)
cosine_dist = cosine_dist_mat.sum() / len(appearance_feat) / len(appearance_feat)
cosine_dist_ret.append(cosine_dist)
video_cosine_dist_ret.append(sum(cosine_dist_ret) / len(cosine_dist_ret))
print(video_cosine_dist_ret)
import matplotlib.pyplot as plt
mot = [0.289, 0.327, 0.240, 0.224, 0.301, 0.262, 0.269]
dancetrack = [0.173, 0.150, 0.181, 0.181, 0.216, 0.176, 0.186, 0.215, 0.227, 0.181,
0.214, 0.172, 0.206, 0.204, 0.200, 0.236, 0.176, 0.172, 0.221, 0.170,
0.212, 0.233, 0.207, 0.229, 0.140]
mot_x = range(len(mot))
dancetrack_x = range(len(mot), len(mot) + len(dancetrack))
fig, ax = plt.subplots(figsize=(15, 5))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.bar(x=mot_x, height=mot, alpha=0.3, color='blue', label='MOT17')
plt.bar(x=dancetrack_x, height=dancetrack, alpha=0.3, color='red', label='DanceTrack')
plt.legend(fontsize=16)
plt.xticks([])
plt.ylim((0.10, 0.35))
plt.title("Cosine distance of re-ID feature", fontsize=16)
plt.savefig('bar.pdf', bbox_inches='tight', dpi=100)
plt.close()
|
the-stack_0_21624 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import git
import math
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import pacf
from pandas.plotting import autocorrelation_plot
from datetime import datetime
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
df_jhu = pd.read_csv(f"{homedir}/data/us/aggregate_jhu.csv")
# Get rid of the aggregate country data
df_jhu = df_jhu.drop([0])
# convert data into a string
df_jhu['FIPS'] = df_jhu['FIPS'].map(lambda f : str(f))
# make sure FIPS codes are all length 5 strings
def alter(fips):
if len(fips) == 4:
return '0' + fips
return fips
df_jhu['FIPS'] = df_jhu['FIPS'].map(alter)
df_jhu = df_jhu.set_index('FIPS')
df_jhu['fips'] = df_jhu.index.map(lambda s : int(s))
# gets list of all fips numbers
def get_fips():
Y = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_counties_daily.csv")
return set(Y.fips.values)
# helper date function
def get_date(datestr, formatstr='%Y-%m-%d'):
return datetime.strptime(datestr, formatstr)
cum_deaths = pd.read_csv(f"{homedir}/data/us/covid/deaths.csv")
cum_deaths = cum_deaths.iloc[1:]
def get_cum_deaths(fips, clip_zeros=True):
""" function that returns cumulative death data for a county
Parameters
-----------
fips: int
FIPS code of county in question
clip_zeros: bool
When this is set to true, the function will only start reporting
when deaths start occuring
Returns
----------
(X, y) : (ndarray, ndarry)
X: array of number of days since Jan 1st
y: number of cumulative deaths
"""
idx = cum_deaths.index[cum_deaths['countyFIPS'] == fips].values[0]
county_deaths = cum_deaths.loc[cum_deaths['countyFIPS'] == fips]
dates = pd.to_datetime(county_deaths.columns[4:].values).map(lambda dt : str(dt))
X = np.array([(get_date(d[:10]) - get_date('2020-01-01')).days for d in dates])
y = []
for i in range(4, len(county_deaths.columns)):
y.append(county_deaths.loc[idx,county_deaths.columns[i]])
if not clip_zeros:
return X, y
for i in range(len(y)):
if y[i] != 0:
return X[i:], y[i:]
cum_cases = pd.read_csv(f"{homedir}/data/us/covid/confirmed_cases.csv")
cum_cases = cum_cases.iloc[1:]
cum_cases = cum_cases.iloc[:, :-1]
def get_cum_cases(fips,clip_zeros=False):
""" function that returns cumulative cases data for a county
Parameters
-----------
fips: int
FIPS code of county in question
clip_zeros: bool
When this is set to true, the function will only start reporting
when deaths start occuring. WARNING: setting this to be true
could case the return value to be none.
Returns
----------
(X, y) : (ndarray, ndarry)
X: array of number of days since Jan 1st
y: number of cumulative cases
"""
idx = cum_cases.index[cum_cases['countyFIPS'] == fips].values[0]
county_cases = cum_cases.loc[cum_cases['countyFIPS'] == fips]
dates = pd.to_datetime(county_cases.columns[4:].values).map(lambda dt : str(dt))
X = np.array([(get_date(d[:10]) - get_date('2020-01-01')).days for d in dates])
y = []
for i in range(4, len(county_cases.columns)):
y.append(county_cases.loc[idx,county_cases.columns[i]])
if not clip_zeros:
return X, y
for i in range(len(y)):
if y[i] != 0:
return X[i:], y[i:]
NYT_counties_daily = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_counties_daily.csv")
Y_county = NYT_counties_daily.loc[NYT_counties_daily['fips'] == 1005, :]
def get_delta_deaths(fips, clip_zeros=False):
"""Returns the number of new deaths per day of a given county
Parameters
----------
fips: int
FIPS code of county in question
clip_zeros: bool
If set to true, it will only report data after the 1st death occurs
Returns
----------
(X, y): ndarray, ndarray
X: number of days since Jan 1st
y: number of deaths per day
"""
Y_county = NYT_counties_daily.loc[NYT_counties_daily['fips'] == fips, :]
Y_county.head()
start_date = '2020-01-01'
Y_county['time'] = Y_county['date'].map(lambda d : (get_date(d) - get_date('2020-02-01')).days)
X, y = (Y_county.time.values, Y_county.deaths.values)
if not clip_zeros:
return X, y
for i in range(len(y)):
if y[i] != 0:
break
return X[i:], y[i:]
def get_delta_cases(fips, clip_zeros=False):
"""Returns the number of new cases per day of a given county
Parameters
----------
fips: int
FIPS code of county in question
clip_zeros: bool
If set to true, it will only report data after the 1st death occurs
Returns
----------
(X, y): ndarray, ndarray
X: number of days since Jan 1st
y: number of new cases per day
"""
Y_county = NYT_counties_daily.loc[NYT_counties_daily['fips'] == fips, :]
Y_county.head()
start_date = '2020-01-01'
Y_county['time'] = Y_county['date'].map(lambda d : (get_date(d) - get_date('2020-02-01')).days)
X, y = (Y_county.time.values, Y_county.cases.values)
if not clip_zeros:
return X, y
for i in range(len(y)):
if y[i] != 0:
break
return X[i:], y[i:]
def get_delta_deaths_ratio(fips, clip_zeros=False, avg_period=5):
"""Returns the number of new deaths per day as a ratio over the running
average number of new deaths. When ratio is undefined, we set to 1
Parameters
----------
fips: int
FIPS code of county in question
clip_zeros: bool
If set to true, it will only report data after the 1st death occurs
avg_period: int
Length of running average to keep track of
Returns
----------
(X, y): ndarray, ndarray
X: number of days since Jan 1st
y: ratio number of deaths per day to the running average
"""
X_raw, y_raw = get_delta_deaths(fips, clip_zeros)
y = []
running_sum = 0.0
running_time = 0
for i in range(len(X_raw)):
if y_raw[i] == 0:
y.append(0)
elif running_sum == 0:
y.append(1) # if this is the first case we define the signal as 1
else:
avg = running_sum/running_time
y.append(y_raw[i]/avg)
if running_time == avg_period:
running_sum = running_sum + y_raw[i] - y_raw[i - avg_period]
else:
running_sum = running_sum + y_raw[i]
running_time = running_time + 1
if running_sum == 0:
running_time = 1
return (X_raw, np.array(y))
def get_delta_cases_ratio(fips, clip_zeros=False, avg_period=5):
"""Returns the number of new cases per day as a ratio over the running
average number of new deaths
Parameters
----------
fips: int
FIPS code of county in question
clip_zeros: bool
If set to true, it will only report data after the 1st death occurs
avg_period: int
Length of running average to keep track of
Returns
----------
(X, y): ndarray, ndarray
X: number of days since Jan 1st
y: ratio number of cases per day to the running average
"""
X_raw, y_raw = get_delta_cases(fips, clip_zeros)
y = []
running_sum = 0.0
running_time = 0
for i in range(len(X_raw)):
if y_raw[i] == 0:
y.append(0)
elif running_sum == 0:
y.append(1) # if this is the first case we define the signal as 1
else:
avg = running_sum/running_time
y.append(y_raw[i]/avg)
if running_time == avg_period:
running_sum = running_sum + y_raw[i] - y_raw[i - avg_period]
else:
running_sum = running_sum + y_raw[i]
running_time = running_time + 1
if running_sum == 0:
running_time = 1
return (X_raw, np.array(y))
def get_XY(features, delta_y, look_back_y, get_y):
"""
This is kinda jank maybe don't use it.
"""
df = df_jhu[features]
df = df[df.fips % 1000 != 0]
df = df[df.State != 'PR'] # peurto rico has some weird data...
df = df[df.POP_ESTIMATE_2018 > 1000] # restrict to large counties since getting lots of data is difficult
# fill out missing data
df.at['02158', 'Area in square miles - Land area'] = 19673
df.at['02158', 'Density per square mile of land area - Population'] = 0.44
df.at['46102', 'Area in square miles - Land area'] = 2097
df.at['46102', 'Density per square mile of land area - Population'] = 6.5
n, d = df.shape
col_names = []
for i in range(look_back_y):
col_name = "y at t = -%d" %i
col_names.append(col_name)
df[col_name] = np.zeros(n)
Y = []
for fips in df.index:
X, ys = get_y(int(fips))
if len(ys) == 0:
Y.append(0)
continue
Y.append(ys[-1])
for i in range(look_back_y):
if i + delta_y < len(ys):
df.at[fips, col_names[i]] = ys[-1 - i - delta_y]
df['target'] = Y
return df
|
the-stack_0_21630 | """ Class for the Sequence to sequence model for ATIS."""
import torch
import torch.nn.functional as F
from . import torch_utils
import data_util.snippets as snippet_handler
import data_util.sql_util
import data_util.vocabulary as vocab
from data_util.vocabulary import EOS_TOK, UNK_TOK
import data_util.tokenizers
from .token_predictor import construct_token_predictor
from .attention import Attention
from .model import ATISModel, encode_snippets_with_states, get_token_indices
from data_util.utterance import ANON_INPUT_KEY
from .encoder import Encoder, SchemaEncoder1
from .decoder import SequencePredictorWithSchema
from . import utils_bert
import data_util.atis_batch
from .gated_graph_conv import GatedGraphConv
from .encoder import Encoder, Encoder_Gnn
LIMITED_INTERACTIONS = {"raw/atis2/12-1.1/ATIS2/TEXT/TRAIN/SRI/QS0/1": 22,
"raw/atis3/17-1.1/ATIS3/SP_TRN/MIT/8K7/5": 14,
"raw/atis2/12-1.1/ATIS2/TEXT/TEST/NOV92/770/5": -1}
END_OF_INTERACTION = {"quit", "exit", "done"}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class SchemaInteractionATISModel(ATISModel):
""" Interaction ATIS model, where an interaction is processed all at once.
"""
def __init__(self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer):
ATISModel.__init__(
self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer)
if self.params.use_schema_encoder:
# Create the schema encoder
schema_encoder_num_layer = 1
schema_encoder_input_size = params.input_embedding_size
schema_encoder_state_size = params.encoder_state_size
if params.use_bert:
schema_encoder_input_size = self.bert_config.hidden_size
self.schema_encoder = SchemaEncoder1(schema_encoder_num_layer, schema_encoder_input_size, schema_encoder_state_size)
if params.use_gnn and not params.use_bert:
# use bert to encoder nodes
self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)
encoder_input_size = self.bert_config.hidden_size
encoder_output_size = params.encoder_state_size
self.gnn = GatedGraphConv(encoder_output_size, 2, 3) #input_dim, num_timesteps, num_edge_types,
# two encoders, the second one summarize from bert embedding of nodes, the first one generate one embedding for schema
self.gnn_encoder1 = Encoder_Gnn(1, encoder_input_size, encoder_output_size) #num_layers, input_size, state_size
self.gnn_encoder2 = Encoder_Gnn(1, encoder_output_size, encoder_output_size)
# self-attention
if self.params.use_schema_self_attention:
self.schema2schema_attention_module = Attention(self.schema_attention_key_size, self.schema_attention_key_size, self.schema_attention_key_size)
# utterance level attention
if self.params.use_utterance_attention:
self.utterance_attention_module = Attention(self.params.encoder_state_size, self.params.encoder_state_size, self.params.encoder_state_size)
# Use attention module between input_hidden_states and schema_states
# schema_states: self.schema_attention_key_size x len(schema)
# input_hidden_states: self.utterance_attention_key_size x len(input)
if params.use_encoder_attention:
self.utterance2schema_attention_module = Attention(self.schema_attention_key_size, self.utterance_attention_key_size, self.utterance_attention_key_size)
self.schema2utterance_attention_module = Attention(self.utterance_attention_key_size, self.schema_attention_key_size, self.schema_attention_key_size)
new_attention_key_size = self.schema_attention_key_size + self.utterance_attention_key_size
self.schema_attention_key_size = new_attention_key_size
self.utterance_attention_key_size = new_attention_key_size
if self.params.use_schema_encoder_2:
self.schema_encoder_2 = Encoder(schema_encoder_num_layer, self.schema_attention_key_size, self.schema_attention_key_size)
self.utterance_encoder_2 = Encoder(params.encoder_num_layers, self.utterance_attention_key_size, self.utterance_attention_key_size)
self.token_predictor = construct_token_predictor(params,
output_vocabulary,
self.utterance_attention_key_size,
self.schema_attention_key_size,
self.final_snippet_size,
anonymizer)
# Use schema_attention in decoder
if params.use_schema_attention and params.use_query_attention:
decoder_input_size = params.output_embedding_size + self.utterance_attention_key_size + self.schema_attention_key_size + params.encoder_state_size
elif params.use_schema_attention:
decoder_input_size = params.output_embedding_size + self.utterance_attention_key_size + self.schema_attention_key_size
else:
decoder_input_size = params.output_embedding_size + self.utterance_attention_key_size
self.decoder = SequencePredictorWithSchema(params, decoder_input_size, self.output_embedder, self.column_name_token_embedder, self.token_predictor)
def predict_turn(self,
utterance_final_state,
input_hidden_states,
schema_states,
max_generation_length,
gold_query=None,
snippets=None,
input_sequence=None,
previous_queries=None,
previous_query_states=None,
input_schema=None,
feed_gold_tokens=False,
training=False):
""" Gets a prediction for a single turn -- calls decoder and updates loss, etc.
TODO: this can probably be split into two methods, one that just predicts
and another that computes the loss.
"""
predicted_sequence = []
fed_sequence = []
loss = None
token_accuracy = 0.
if self.params.use_encoder_attention:
schema_attention = self.utterance2schema_attention_module(torch.stack(schema_states,dim=0), input_hidden_states).vector # input_value_size x len(schema)
utterance_attention = self.schema2utterance_attention_module(torch.stack(input_hidden_states,dim=0), schema_states).vector # schema_value_size x len(input)
if schema_attention.dim() == 1:
schema_attention = schema_attention.unsqueeze(1)
if utterance_attention.dim() == 1:
utterance_attention = utterance_attention.unsqueeze(1)
new_schema_states = torch.cat([torch.stack(schema_states, dim=1), schema_attention], dim=0) # (input_value_size+schema_value_size) x len(schema)
schema_states = list(torch.split(new_schema_states, split_size_or_sections=1, dim=1))
schema_states = [schema_state.squeeze() for schema_state in schema_states]
new_input_hidden_states = torch.cat([torch.stack(input_hidden_states, dim=1), utterance_attention], dim=0) # (input_value_size+schema_value_size) x len(input)
input_hidden_states = list(torch.split(new_input_hidden_states, split_size_or_sections=1, dim=1))
input_hidden_states = [input_hidden_state.squeeze() for input_hidden_state in input_hidden_states]
# bi-lstm over schema_states and input_hidden_states (embedder is an identify function)
if self.params.use_schema_encoder_2:
final_schema_state, schema_states = self.schema_encoder_2(schema_states, lambda x: x, dropout_amount=self.dropout)
final_utterance_state, input_hidden_states = self.utterance_encoder_2(input_hidden_states, lambda x: x, dropout_amount=self.dropout)
if feed_gold_tokens:
decoder_results = self.decoder(utterance_final_state,
input_hidden_states,
schema_states,
max_generation_length,
gold_sequence=gold_query,
input_sequence=input_sequence,
previous_queries=previous_queries,
previous_query_states=previous_query_states,
input_schema=input_schema,
snippets=snippets,
dropout_amount=self.dropout)
all_scores = []
all_alignments = []
for prediction in decoder_results.predictions:
scores = F.softmax(prediction.scores, dim=0)
alignments = prediction.aligned_tokens
if self.params.use_previous_query and self.params.use_copy_switch and len(previous_queries) > 0:
query_scores = F.softmax(prediction.query_scores, dim=0)
copy_switch = prediction.copy_switch
scores = torch.cat([scores * (1 - copy_switch), query_scores * copy_switch], dim=0)
alignments = alignments + prediction.query_tokens
all_scores.append(scores)
all_alignments.append(alignments)
# Compute the loss
gold_sequence = gold_query
loss = torch_utils.compute_loss(gold_sequence, all_scores, all_alignments, get_token_indices)
if not training:
predicted_sequence = torch_utils.get_seq_from_scores(all_scores, all_alignments)
token_accuracy = torch_utils.per_token_accuracy(gold_sequence, predicted_sequence)
fed_sequence = gold_sequence
else:
decoder_results = self.decoder(utterance_final_state,
input_hidden_states,
schema_states,
max_generation_length,
input_sequence=input_sequence,
previous_queries=previous_queries,
previous_query_states=previous_query_states,
input_schema=input_schema,
snippets=snippets,
dropout_amount=self.dropout)
predicted_sequence = decoder_results.sequence
fed_sequence = predicted_sequence
decoder_states = [pred.decoder_state for pred in decoder_results.predictions]
# fed_sequence contains EOS, which we don't need when encoding snippets.
# also ignore the first state, as it contains the BEG encoding.
for token, state in zip(fed_sequence[:-1], decoder_states[1:]):
if snippet_handler.is_snippet(token):
snippet_length = 0
for snippet in snippets:
if snippet.name == token:
snippet_length = len(snippet.sequence)
break
assert snippet_length > 0
decoder_states.extend([state for _ in range(snippet_length)])
else:
decoder_states.append(state)
return (predicted_sequence,
loss,
token_accuracy,
decoder_states,
decoder_results)
def encode_schema_bow_simple(self, input_schema):
schema_states = []
for column_name in input_schema.column_names_embedder_input:
schema_states.append(input_schema.column_name_embedder_bow(column_name, surface_form=False, column_name_token_embedder=self.column_name_token_embedder))
input_schema.set_column_name_embeddings(schema_states)
return schema_states
def encode_schema_self_attention(self, schema_states):
schema_self_attention = self.schema2schema_attention_module(torch.stack(schema_states,dim=0), schema_states).vector
if schema_self_attention.dim() == 1:
schema_self_attention = schema_self_attention.unsqueeze(1)
residual_schema_states = list(torch.split(schema_self_attention, split_size_or_sections=1, dim=1))
residual_schema_states = [schema_state.squeeze() for schema_state in residual_schema_states]
new_schema_states = [schema_state+residual_schema_state for schema_state, residual_schema_state in zip(schema_states, residual_schema_states)]
return new_schema_states
def encode_schema(self, input_schema, dropout=False):
schema_states = []
for column_name_embedder_input in input_schema.column_names_embedder_input:
tokens = column_name_embedder_input.split()
if dropout:
final_schema_state_one, schema_states_one = self.schema_encoder(tokens, self.column_name_token_embedder, dropout_amount=self.dropout)
else:
final_schema_state_one, schema_states_one = self.schema_encoder(tokens, self.column_name_token_embedder)
# final_schema_state_one: 1 means hidden_states instead of cell_memories, -1 means last layer
schema_states.append(final_schema_state_one[1][-1])
input_schema.set_column_name_embeddings(schema_states)
# self-attention over schema_states
if self.params.use_schema_self_attention:
schema_states = self.encode_schema_self_attention(schema_states)
return schema_states
def get_gnn_encoding(self,input_schema, gnn,gnn_encoder1):
schema_states, column_names = utils_bert.get_gnn_encoding(self.tokenizer, self.model_bert,None, input_schema,gnn, gnn_encoder1,embedder=self.input_embedder,bert_input_version=self.params.bert_input_version) # tokenizer,input_sequence,input_schema,bert_input_version='v1',num_out_layers_h=1, max_seq_length=512,num_out_layers_n=1
input_schema.set_column_name_embeddings(schema_states,column_names=column_names)
# self-attention over schema_states
if self.params.use_schema_self_attention:
schema_states = self.encode_schema_self_attention(schema_states)
return schema_states
def get_bert_encoding(self, input_sequence, input_schema, discourse_state, dropout, gnn=None, use_gnn=False):
utterance_states, schema_token_states, relations = utils_bert.get_bert_encoding(self.bert_config, self.model_bert, self.tokenizer, input_sequence, input_schema, bert_input_version=self.params.bert_input_version, gnn=self.gnn,use_gnn=self.params.use_gnn ,num_out_layers_n=1, num_out_layers_h=1)
if self.params.discourse_level_lstm:
utterance_token_embedder = lambda x: torch.cat([x, discourse_state], dim=0)
else:
utterance_token_embedder = lambda x: x
if dropout:
final_utterance_state, utterance_states = self.utterance_encoder(
utterance_states,
utterance_token_embedder,
dropout_amount=self.dropout)
else:
final_utterance_state, utterance_states = self.utterance_encoder(
utterance_states,
utterance_token_embedder)
schema_states = []
for schema_token_states1 in schema_token_states:
if dropout:
final_schema_state_one, schema_states_one = self.schema_encoder(schema_token_states1, lambda x: x, dropout_amount=self.dropout)
else:
final_schema_state_one, schema_states_one = self.schema_encoder(schema_token_states1, lambda x: x)
# final_schema_state_one: 1 means hidden_states instead of cell_memories, -1 means last layer
schema_states.append(final_schema_state_one[1][-1])
if use_gnn:
# print(len(schema_states),schema_states[0].size())
relations = [torch.tensor(i, dtype=torch.long).to(device) for i in relations]
# print(333333,relations, all_encoder_layer.size())
schema_token_states = torch.cat([i.unsqueeze(0) for i in schema_states],0)
schema_token_states = [i for i in gnn(schema_token_states,relations)]
input_schema.set_column_name_embeddings(schema_states)
# self-attention over schema_states
if self.params.use_schema_self_attention:
schema_states = self.encode_schema_self_attention(schema_states)
return final_utterance_state, utterance_states, schema_states
def get_query_token_embedding(self, output_token, input_schema):
if input_schema:
if not (self.output_embedder.in_vocabulary(output_token) or input_schema.in_vocabulary(output_token, surface_form=True)):
output_token = 'value'
if self.output_embedder.in_vocabulary(output_token):
output_token_embedding = self.output_embedder(output_token)
else:
output_token_embedding = input_schema.column_name_embedder(output_token, surface_form=True)
else:
output_token_embedding = self.output_embedder(output_token)
return output_token_embedding
def get_utterance_attention(self, final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep):
# self-attention between utterance_states
final_utterance_states_c.append(final_utterance_state[0][0])
final_utterance_states_h.append(final_utterance_state[1][0])
final_utterance_states_c = final_utterance_states_c[-num_utterances_to_keep:]
final_utterance_states_h = final_utterance_states_h[-num_utterances_to_keep:]
attention_result = self.utterance_attention_module(final_utterance_states_c[-1], final_utterance_states_c)
final_utterance_state_attention_c = final_utterance_states_c[-1] + attention_result.vector.squeeze()
attention_result = self.utterance_attention_module(final_utterance_states_h[-1], final_utterance_states_h)
final_utterance_state_attention_h = final_utterance_states_h[-1] + attention_result.vector.squeeze()
final_utterance_state = ([final_utterance_state_attention_c],[final_utterance_state_attention_h])
return final_utterance_states_c, final_utterance_states_h, final_utterance_state
def get_previous_queries(self, previous_queries, previous_query_states, previous_query, input_schema):
previous_queries.append(previous_query)
num_queries_to_keep = min(self.params.maximum_queries, len(previous_queries))
previous_queries = previous_queries[-num_queries_to_keep:]
query_token_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)
_, previous_outputs = self.query_encoder(previous_query, query_token_embedder, dropout_amount=self.dropout)
assert len(previous_outputs) == len(previous_query)
previous_query_states.append(previous_outputs)
previous_query_states = previous_query_states[-num_queries_to_keep:]
return previous_queries, previous_query_states
def train_step(self, interaction, max_generation_length, snippet_alignment_probability=1.):
""" Trains the interaction-level model on a single interaction.
Inputs:
interaction (Interaction): The interaction to train on.
learning_rate (float): Learning rate to use.
snippet_keep_age (int): Age of oldest snippets to use.
snippet_alignment_probability (float): The probability that a snippet will
be used in constructing the gold sequence.
"""
# assert self.params.discourse_level_lstm
losses = []
total_gold_tokens = 0
input_hidden_states = []
input_sequences = []
final_utterance_states_c = []
final_utterance_states_h = []
previous_query_states = []
previous_queries = []
decoder_states = []
discourse_state = None
if self.params.discourse_level_lstm:
discourse_state, discourse_lstm_states = self._initialize_discourse_states()
discourse_states = []
# Schema and schema embeddings
input_schema = interaction.get_schema()
schema_states = []
# here
if input_schema and not self.params.use_bert and not self.params.use_gnn:
schema_states = self.encode_schema_bow_simple(input_schema)
if input_schema and self.params.use_gnn and not self.params.use_bert:
schema_states = self.get_gnn_encoding(input_schema,self.gnn,self.gnn_encoder1)
# _,schema_states = self.gnn_encoder2(schema_states)
for utterance_index, utterance in enumerate(interaction.gold_utterances()):
if interaction.identifier in LIMITED_INTERACTIONS and utterance_index > LIMITED_INTERACTIONS[interaction.identifier]:
break
input_sequence = utterance.input_sequence()
available_snippets = utterance.snippets()
previous_query = utterance.previous_query()
# Get the gold query: reconstruct if the alignment probability is less than one
if snippet_alignment_probability < 1.:
gold_query = sql_util.add_snippets_to_query(
available_snippets,
utterance.contained_entities(),
utterance.anonymized_gold_query(),
prob_align=snippet_alignment_probability) + [vocab.EOS_TOK]
else:
gold_query = utterance.gold_query()
# Encode the utterance, and update the discourse-level states
if not self.params.use_bert:
if self.params.discourse_level_lstm:
utterance_token_embedder = lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0)
else:
utterance_token_embedder = self.input_embedder
final_utterance_state, utterance_states = self.utterance_encoder(
input_sequence,
utterance_token_embedder,
dropout_amount=self.dropout)
else:
final_utterance_state, utterance_states, schema_states = self.get_bert_encoding(input_sequence, input_schema, discourse_state, gnn=self.gnn,use_gnn=self.params.use_gnn,dropout=True)
input_hidden_states.extend(utterance_states)
input_sequences.append(input_sequence)
num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences))
# final_utterance_state[1][0] is the first layer's hidden states at the last time step (concat forward lstm and backward lstm)
if self.params.discourse_level_lstm:
_, discourse_state, discourse_lstm_states = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states, self.dropout)
if self.params.use_utterance_attention:
final_utterance_states_c, final_utterance_states_h, final_utterance_state = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep)
if self.params.state_positional_embeddings:
utterance_states, flat_sequence = self._add_positional_embeddings(input_hidden_states, input_sequences)
else:
flat_sequence = []
for utt in input_sequences[-num_utterances_to_keep:]:
flat_sequence.extend(utt)
snippets = None
if self.params.use_snippets:
if self.params.previous_decoder_snippet_encoding:
snippets = encode_snippets_with_states(available_snippets, decoder_states)
else:
snippets = self._encode_snippets(previous_query, available_snippets, input_schema)
if self.params.use_previous_query and len(previous_query) > 0:
previous_queries, previous_query_states = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema)
if len(gold_query) <= max_generation_length and len(previous_query) <= max_generation_length:
prediction = self.predict_turn(final_utterance_state,
utterance_states,
schema_states,
max_generation_length,
gold_query=gold_query,
snippets=snippets,
input_sequence=flat_sequence,
previous_queries=previous_queries,
previous_query_states=previous_query_states,
input_schema=input_schema,
feed_gold_tokens=True,
training=True)
loss = prediction[1]
decoder_states = prediction[3]
total_gold_tokens += len(gold_query)
losses.append(loss)
else:
# Break if previous decoder snippet encoding -- because the previous
# sequence was too long to run the decoder.
if self.params.previous_decoder_snippet_encoding:
break
continue
torch.cuda.empty_cache()
if losses:
average_loss = torch.sum(torch.stack(losses)) / total_gold_tokens
# Renormalize so the effect is normalized by the batch size.
normalized_loss = average_loss
if self.params.reweight_batch:
normalized_loss = len(losses) * average_loss / float(self.params.batch_size)
normalized_loss.backward()
self.trainer.step()
if self.params.fine_tune_bert:
self.bert_trainer.step()
self.zero_grad()
loss_scalar = normalized_loss.item()
else:
loss_scalar = 0.
return loss_scalar
def predict_with_predicted_queries(self, interaction, max_generation_length, syntax_restrict=True):
""" Predicts an interaction, using the predicted queries to get snippets."""
# assert self.params.discourse_level_lstm
syntax_restrict=False
predictions = []
input_hidden_states = []
input_sequences = []
final_utterance_states_c = []
final_utterance_states_h = []
previous_query_states = []
previous_queries = []
discourse_state = None
if self.params.discourse_level_lstm:
discourse_state, discourse_lstm_states = self._initialize_discourse_states()
discourse_states = []
# Schema and schema embeddings
input_schema = interaction.get_schema()
schema_states = []
if input_schema and not self.params.use_bert and not self.params.use_gnn:
schema_states = self.encode_schema_bow_simple(input_schema)
if input_schema and self.params.use_gnn and not self.params.use_bert:
schema_states = self.get_gnn_encoding(input_schema,self.gnn,self.gnn_encoder1)
# _,schema_states = self.gnn_encoder2(schema_states)
interaction.start_interaction()
while not interaction.done():
utterance = interaction.next_utterance()
available_snippets = utterance.snippets()
previous_query = utterance.previous_query()
input_sequence = utterance.input_sequence()
if not self.params.use_bert:
if self.params.discourse_level_lstm:
utterance_token_embedder = lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0)
else:
utterance_token_embedder = self.input_embedder
final_utterance_state, utterance_states = self.utterance_encoder(
input_sequence,
utterance_token_embedder)
else:
final_utterance_state, utterance_states, schema_states = self.get_bert_encoding(input_sequence, input_schema, discourse_state,gnn=self.gnn,use_gnn=self.params.use_gnn, dropout=False)
input_hidden_states.extend(utterance_states)
input_sequences.append(input_sequence)
num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences))
if self.params.discourse_level_lstm:
_, discourse_state, discourse_lstm_states = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states)
if self.params.use_utterance_attention:
final_utterance_states_c, final_utterance_states_h, final_utterance_state = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep)
if self.params.state_positional_embeddings:
utterance_states, flat_sequence = self._add_positional_embeddings(input_hidden_states, input_sequences)
else:
flat_sequence = []
for utt in input_sequences[-num_utterances_to_keep:]:
flat_sequence.extend(utt)
snippets = None
if self.params.use_snippets:
snippets = self._encode_snippets(previous_query, available_snippets, input_schema)
if self.params.use_previous_query and len(previous_query) > 0:
previous_queries, previous_query_states = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema)
results = self.predict_turn(final_utterance_state,
utterance_states,
schema_states,
max_generation_length,
input_sequence=flat_sequence,
previous_queries=previous_queries,
previous_query_states=previous_query_states,
input_schema=input_schema,
snippets=snippets)
predicted_sequence = results[0]
predictions.append(results)
# Update things necessary for using predicted queries
anonymized_sequence = utterance.remove_snippets(predicted_sequence)
if EOS_TOK in anonymized_sequence:
anonymized_sequence = anonymized_sequence[:-1] # Remove _EOS
else:
anonymized_sequence = ['select', '*', 'from', 't1']
if not syntax_restrict:
utterance.set_predicted_query(interaction.remove_snippets(predicted_sequence))
if input_schema:
# on SParC
interaction.add_utterance(utterance, anonymized_sequence, previous_snippets=utterance.snippets(), simple=True)
else:
# on ATIS
interaction.add_utterance(utterance, anonymized_sequence, previous_snippets=utterance.snippets(), simple=False)
else:
utterance.set_predicted_query(utterance.previous_query())
interaction.add_utterance(utterance, utterance.previous_query(), previous_snippets=utterance.snippets())
return predictions
def predict_with_gold_queries(self, interaction, max_generation_length, feed_gold_query=False):
""" Predicts SQL queries for an interaction.
Inputs:
interaction (Interaction): Interaction to predict for.
feed_gold_query (bool): Whether or not to feed the gold token to the
generation step.
"""
# assert self.params.discourse_level_lstm
predictions = []
input_hidden_states = []
input_sequences = []
final_utterance_states_c = []
final_utterance_states_h = []
previous_query_states = []
previous_queries = []
decoder_states = []
discourse_state = None
if self.params.discourse_level_lstm:
discourse_state, discourse_lstm_states = self._initialize_discourse_states()
discourse_states = []
# Schema and schema embeddings
input_schema = interaction.get_schema()
schema_states = []
if input_schema and not self.params.use_bert and not self.params.use_gnn:
schema_states = self.encode_schema_bow_simple(input_schema)
if input_schema and self.params.use_gnn and not self.params.use_bert:
schema_states = self.get_gnn_encoding(input_schema,self.gnn,self.gnn_encoder1)
# _,schema_states = self.gnn_encoder2(schema_states)
for utterance in interaction.gold_utterances():
input_sequence = utterance.input_sequence()
available_snippets = utterance.snippets()
previous_query = utterance.previous_query()
# Encode the utterance, and update the discourse-level states
if not self.params.use_bert:
if self.params.discourse_level_lstm:
utterance_token_embedder = lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0)
else:
utterance_token_embedder = self.input_embedder
final_utterance_state, utterance_states = self.utterance_encoder(
input_sequence,
utterance_token_embedder,
dropout_amount=self.dropout)
else:
final_utterance_state, utterance_states, schema_states = self.get_bert_encoding(input_sequence, input_schema, discourse_state, dropout=True)
input_hidden_states.extend(utterance_states)
input_sequences.append(input_sequence)
num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences))
if self.params.discourse_level_lstm:
_, discourse_state, discourse_lstm_states = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states, self.dropout)
if self.params.use_utterance_attention:
final_utterance_states_c, final_utterance_states_h, final_utterance_state = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep)
if self.params.state_positional_embeddings:
utterance_states, flat_sequence = self._add_positional_embeddings(input_hidden_states, input_sequences)
else:
flat_sequence = []
for utt in input_sequences[-num_utterances_to_keep:]:
flat_sequence.extend(utt)
snippets = None
if self.params.use_snippets:
if self.params.previous_decoder_snippet_encoding:
snippets = encode_snippets_with_states(available_snippets, decoder_states)
else:
snippets = self._encode_snippets(previous_query, available_snippets, input_schema)
if self.params.use_previous_query and len(previous_query) > 0:
previous_queries, previous_query_states = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema)
prediction = self.predict_turn(final_utterance_state,
utterance_states,
schema_states,
max_generation_length,
gold_query=utterance.gold_query(),
snippets=snippets,
input_sequence=flat_sequence,
previous_queries=previous_queries,
previous_query_states=previous_query_states,
input_schema=input_schema,
feed_gold_tokens=feed_gold_query)
decoder_states = prediction[3]
predictions.append(prediction)
return predictions
|
the-stack_0_21632 | # totally didn't do this
# reads as boring as it can be
def reverse(nums: list[int], repeat: int):
knot = list(range(256))
pos = 0
skip = 0
for _i in range(repeat):
for i in nums:
temp = []
for j in range(i):
temp.append(knot[(pos + j) % 256])
for j in range(i):
knot[(pos + i - 1 - j) % 256] = temp[j]
pos += skip + i
skip += 1
return knot
def densehash(knot: list[int]):
dense = [0] * 16
for i in range(16):
dense[i] = knot[16 * i]
for m in range(1, 16):
dense[i] ^= knot[16 * i + m]
return dense
def kh(dense: list[int]):
knothash = ""
for i in dense:
if len(hex(i)[2:]) == 2:
knothash += hex(i)[2:]
else:
knothash += "0" + hex(i)[2:]
return knothash
def p1(line: str):
nums = list(map(int, line.split(",")))
knot = reverse(nums, 1)
return knot[0] * knot[1]
def p2(line: str):
nums = list()
for i in range(len(line)):
nums.append(ord(line[i]))
nums += [17, 31, 73, 47, 23]
sparce = reverse(nums, 64)
dense = densehash(sparce)
return kh(dense)
input = "230,1,2,221,97,252,168,169,57,99,0,254,181,255,235,167"
assert p1(input) == 2928
assert p2(input) == "0c2f794b2eb555f7830766bf8fb65a16"
|
the-stack_0_21633 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import binascii
import io
import os
import colorama
import pytest
import sh
from molecule import util
colorama.init(autoreset=True)
def test_print_debug(capsys):
util.print_debug('test_title', 'test_data')
result, _ = capsys.readouterr()
title = [
colorama.Back.WHITE,
colorama.Style.BRIGHT,
colorama.Fore.BLACK,
'DEBUG: test_title',
colorama.Fore.RESET,
colorama.Back.RESET,
colorama.Style.RESET_ALL,
]
print(''.join(title))
data = [
colorama.Fore.BLACK,
colorama.Style.BRIGHT,
'test_data',
colorama.Style.RESET_ALL,
colorama.Fore.RESET,
]
print(''.join(data))
x, _ = capsys.readouterr()
assert x == result
def test_print_environment_vars(capsys):
env = {
'ANSIBLE_FOO': 'foo',
'ANSIBLE_BAR': 'bar',
'ANSIBLE': None,
'MOLECULE_FOO': 'foo',
'MOLECULE_BAR': 'bar',
'MOLECULE': None,
}
util.print_environment_vars(env)
result, _ = capsys.readouterr()
# Ansible Environment
title = [
colorama.Back.WHITE,
colorama.Style.BRIGHT,
colorama.Fore.BLACK,
'DEBUG: ANSIBLE ENVIRONMENT',
colorama.Fore.RESET,
colorama.Back.RESET,
colorama.Style.RESET_ALL,
]
print(''.join(title))
data = [
colorama.Fore.BLACK,
colorama.Style.BRIGHT,
util.safe_dump({'ANSIBLE_FOO': 'foo', 'ANSIBLE_BAR': 'bar'}),
colorama.Style.RESET_ALL,
colorama.Fore.RESET,
]
print(''.join(data))
# Molecule Environment
title = [
colorama.Back.WHITE,
colorama.Style.BRIGHT,
colorama.Fore.BLACK,
'DEBUG: MOLECULE ENVIRONMENT',
colorama.Fore.RESET,
colorama.Back.RESET,
colorama.Style.RESET_ALL,
]
print(''.join(title))
data = [
colorama.Fore.BLACK,
colorama.Style.BRIGHT,
util.safe_dump({'MOLECULE_FOO': 'foo', 'MOLECULE_BAR': 'bar'}),
colorama.Style.RESET_ALL,
colorama.Fore.RESET,
]
print(''.join(data))
# Shell Replay
title = [
colorama.Back.WHITE,
colorama.Style.BRIGHT,
colorama.Fore.BLACK,
'DEBUG: SHELL REPLAY',
colorama.Fore.RESET,
colorama.Back.RESET,
colorama.Style.RESET_ALL,
]
print(''.join(title))
data = [
colorama.Fore.BLACK,
colorama.Style.BRIGHT,
'ANSIBLE_BAR=bar ANSIBLE_FOO=foo MOLECULE_BAR=bar MOLECULE_FOO=foo',
colorama.Style.RESET_ALL,
colorama.Fore.RESET,
]
print(''.join(data))
print()
x, _ = capsys.readouterr()
assert x == result
def test_sysexit():
with pytest.raises(SystemExit) as e:
util.sysexit()
assert 1 == e.value.code
def test_sysexit_with_custom_code():
with pytest.raises(SystemExit) as e:
util.sysexit(2)
assert 2 == e.value.code
def test_sysexit_with_message(patched_logger_critical):
with pytest.raises(SystemExit) as e:
util.sysexit_with_message('foo')
assert 1 == e.value.code
patched_logger_critical.assert_called_once_with('foo')
def test_sysexit_with_message_and_custom_code(patched_logger_critical):
with pytest.raises(SystemExit) as e:
util.sysexit_with_message('foo', 2)
assert 2 == e.value.code
patched_logger_critical.assert_called_once_with('foo')
def test_run_command():
cmd = sh.ls.bake()
x = util.run_command(cmd)
assert 0 == x.exit_code
def test_run_command_with_debug(mocker, patched_print_debug):
cmd = sh.ls.bake(_env={'ANSIBLE_FOO': 'foo', 'MOLECULE_BAR': 'bar'})
util.run_command(cmd, debug=True)
x = [
mocker.call('ANSIBLE ENVIRONMENT', '---\nANSIBLE_FOO: foo\n'),
mocker.call('MOLECULE ENVIRONMENT', '---\nMOLECULE_BAR: bar\n'),
mocker.call('SHELL REPLAY', 'ANSIBLE_FOO=foo MOLECULE_BAR=bar'),
mocker.call('COMMAND', sh.which('ls')),
]
assert x == patched_print_debug.mock_calls
def test_run_command_with_debug_handles_no_env(mocker, patched_print_debug):
cmd = sh.ls.bake()
util.run_command(cmd, debug=True)
x = [
mocker.call('ANSIBLE ENVIRONMENT', '--- {}\n'),
mocker.call('MOLECULE ENVIRONMENT', '--- {}\n'),
mocker.call('SHELL REPLAY', ''),
mocker.call('COMMAND', sh.which('ls')),
]
assert x == patched_print_debug.mock_calls
def test_os_walk(temp_dir):
scenarios = ['scenario1', 'scenario2', 'scenario3']
molecule_directory = pytest.helpers.molecule_directory()
for scenario in scenarios:
scenario_directory = os.path.join(molecule_directory, scenario)
molecule_file = pytest.helpers.get_molecule_file(scenario_directory)
os.makedirs(scenario_directory)
util.write_file(molecule_file, '')
result = [f for f in util.os_walk(molecule_directory, 'molecule.yml')]
assert 3 == len(result)
def test_render_template():
template = "{{ foo }} = {{ bar }}"
assert "foo = bar" == util.render_template(template, foo='foo', bar='bar')
def test_write_file(temp_dir):
dest_file = os.path.join(temp_dir.strpath, 'test_util_write_file.tmp')
contents = binascii.b2a_hex(os.urandom(15)).decode('utf-8')
util.write_file(dest_file, contents)
with util.open_file(dest_file) as stream:
data = stream.read()
x = '# Molecule managed\n\n{}'.format(contents)
assert x == data
def molecule_prepender(content):
x = '# Molecule managed\n\nfoo bar'
assert x == util.file_prepender('foo bar')
def test_safe_dump():
x = """
---
foo: bar
""".lstrip()
assert x == util.safe_dump({'foo': 'bar'})
def test_safe_dump_with_increase_indent():
data = {'foo': [{'foo': 'bar', 'baz': 'zzyzx'}]}
x = """
---
foo:
- baz: zzyzx
foo: bar
""".lstrip()
assert x == util.safe_dump(data)
def test_safe_load():
assert {'foo': 'bar'} == util.safe_load('foo: bar')
def test_safe_load_returns_empty_dict_on_empty_string():
assert {} == util.safe_load('')
def test_safe_load_exits_when_cannot_parse():
data = """
---
%foo:
""".strip()
with pytest.raises(SystemExit) as e:
util.safe_load(data)
assert 1 == e.value.code
def test_safe_load_file(temp_dir):
path = os.path.join(temp_dir.strpath, 'foo')
util.write_file(path, 'foo: bar')
assert {'foo': 'bar'} == util.safe_load_file(path)
def test_open_file(temp_dir):
path = os.path.join(temp_dir.strpath, 'foo')
util.write_file(path, 'foo: bar')
with util.open_file(path) as stream:
try:
file_types = (file, io.IOBase)
except NameError:
file_types = io.IOBase
assert isinstance(stream, file_types)
def test_instance_with_scenario_name():
assert 'foo-bar' == util.instance_with_scenario_name('foo', 'bar')
def test_strip_ansi_escape():
string = 'ls\r\n\x1b[00m\x1b[01;31mfoo\x1b[00m\r\n\x1b[01;31m'
assert 'ls\r\nfoo\r\n' == util.strip_ansi_escape(string)
def test_strip_ansi_color():
s = 'foo\x1b[0m\x1b[0m\x1b[0m\n\x1b[0m\x1b[0m\x1b[0m\x1b[0m\x1b[0m'
assert 'foo\n' == util.strip_ansi_color(s)
def test_verbose_flag():
options = {'verbose': True, 'v': True}
assert ['-v'] == util.verbose_flag(options)
assert {} == options
def test_verbose_flag_extra_verbose():
options = {'verbose': True, 'vvv': True}
assert ['-vvv'] == util.verbose_flag(options)
assert {} == options
def test_verbose_flag_preserves_verbose_option():
options = {'verbose': True}
assert [] == util.verbose_flag(options)
assert {'verbose': True} == options
def test_filter_verbose_permutation():
options = {
'v': True,
'vv': True,
'vvv': True,
'vfoo': True,
'foo': True,
'bar': True,
}
x = {'vfoo': True, 'foo': True, 'bar': True}
assert x == util.filter_verbose_permutation(options)
def test_title():
assert 'Foo' == util.title('foo')
assert 'Foo Bar' == util.title('foo_bar')
def test_abs_path(temp_dir):
x = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir, 'foo', 'bar'))
assert x == util.abs_path(os.path.join(os.path.pardir, 'foo', 'bar'))
def test_abs_path_with_none_path():
assert util.abs_path(None) is None
def test_camelize():
assert 'Foo' == util.camelize('foo')
assert 'FooBar' == util.camelize('foo_bar')
assert 'FooBarBaz' == util.camelize('foo_bar_baz')
def test_underscore():
assert 'foo' == util.underscore('Foo')
assert 'foo_bar' == util.underscore('FooBar')
assert 'foo_bar_baz' == util.underscore('FooBarBaz')
@pytest.mark.parametrize(
'a,b,x',
[
# Base of recursion scenarios
(dict(key=1), dict(key=2), dict(key=2)),
(dict(key={}), dict(key=2), dict(key=2)),
(dict(key=1), dict(key={}), dict(key={})),
# Recursive scenario
(dict(a=dict(x=1)), dict(a=dict(x=2)), dict(a=dict(x=2))),
(dict(a=dict(x=1)), dict(a=dict(y=2)), dict(a=dict(x=1, y=2))),
# example taken from python-anyconfig/anyconfig/__init__.py
(
{'b': [{'c': 0}, {'c': 2}], 'd': {'e': 'aaa', 'f': 3}},
{'a': 1, 'b': [{'c': 3}], 'd': {'e': 'bbb'}},
{'a': 1, 'b': [{'c': 3}], 'd': {'e': "bbb", 'f': 3}},
),
],
)
def test_merge_dicts(a, b, x):
assert x == util.merge_dicts(a, b)
|
the-stack_0_21636 |
class LocallyStoredImage(object):
def __init__(self, imgSrc='', localFileName='',
linkhash='', bytes=long(0), fileExtension='', height=0, width=0):
self.imgSrc = imgSrc
self.localFileName = localFileName
self.linkhash = linkhash
self.bytes = bytes
self.fileExtension = fileExtension
self.height = height
self.width = width |
the-stack_0_21638 | import re
import os
import subprocess
from datetime import datetime
from contextlib import contextmanager
from collections import OrderedDict
from typing import Tuple, Optional, Mapping, Dict, Any
import numpy as np
import pandas as pd
from svp.common import utils
from svp.common.selection import UNCERTAINTY_METHODS
# Using shell command to match original example for fastText
NORMALIZE_TEXT = """
cat {input} | tr '[:upper:]' '[:lower:]' | sed -e 's/^/__label__/g' | \\
sed -e "s/'/ ' /g" -e 's/"//g' -e 's/\\./ \\. /g' -e 's/<br \\/>/ /g' \\
-e 's/,/ , /g' -e 's/(/ ( /g' -e 's/)/ ) /g' -e 's/\\!/ \\! /g' \\
-e 's/\\?/ \\? /g' -e 's/\\;/ /g' -e 's/\\:/ /g' | tr -s " " > {output}
"""
FASTTEXT_TRAIN = """
{command} supervised -input "{train_file}" -output "{model_file}" -dim {dim} \
-lr {lr} -wordNgrams {ngrams} -minCount {min_count} -bucket {bucket} \
-epoch {epoch} -thread {threads}
"""
FASTTEXT_TEST = """
{command} test "{model_bin}" "{test_file}"
"""
FASTTEXT_PREDICT = """
{command} predict "{model_bin}" "{test_file}" > "{test_predict}"
"""
# Only want to rank training data points
FASTTEXT_PROBS = """
{command} predict-prob "{model_bin}" "{full_train_file}" {num_classes} \
> "{train_probs}"
"""
LABEL_PATTERN = re.compile(r'^__label__(?P<label>\d+)')
FASTTEXT_SELECTION_METHODS = ['random']
FASTTEXT_SELECTION_METHODS += UNCERTAINTY_METHODS
def fasttext(executable: str,
run_dir: str = './run',
datasets_dir: str = './data',
dataset: str = 'amazon_review_polarity',
dim: int = 10, ngrams: int = 2, min_count: int = 1,
bucket: int = 10_000_000, learning_rate: float = 0.05,
epochs: int = 5,
sizes: Tuple[int, ...] = (
72_000, 360_000, 720_000, 1_080_000, 1_440_000, 1_800_000
),
selection_method: str = 'entropy',
threads: int = 4, seed: Optional[int] = None,
track_test_acc: bool = True):
"""
Perform active learning or core-set selection with fastText.
Training and evaluation are both performed using fastText v0.1.0. Please
download and install fastText before using this function:
https://github.com/facebookresearch/fastText/tree/v0.1.0
Active learning is represented by increasing sizes. To make up the
difference between sizes, additional examples are selected according to
the `selection_method` from remaining pool of data. For example,
`sizes = [100, 200, 300, 400]` would start with a subset of 100 randomly
selected examples and select a series of 3 batches of 100-examples each.
Core-set selection is represented by decreasing sizes. Examples are
selected according to the `selection_method` from entire pool of data.
This assumes that the first size is the size of the entire labeled data,
but it is not explicitly enforced. For example, to reproduce the
"selection via proxy" core-set experiments on Amazon Review Polarity,
`sizes` should be length 2 with `sizes[0] == 3_600_000` and `sizes[1]`
set to the desired subset sizes.
Parameters
----------
executable : str
Path to fastText executable. Please install fastText v0.1.0:
https://github.com/facebookresearch/fastText/tree/v0.1.0
run_dir : str, default './run'
Path to log results and other artifacts.
datasets_dir : str, default './data'
Path to datasets.
dataset : str, default 'amazon_review_polarity'
Dataset to use in experiment (e.g., amazon_review_full)
dim : int, default 10
Size of word vectors.
ngrams : int, default 2
Max length of word ngram.
min_count : int, default 1
Minimal number of word occurences.
bucket : int, default 10_000_000
Number of buckets.
learning_rate : float, default 0.05
Learning rate.
epochs : int, default 5
Number of epochs to train.
sizes : Tuple[int, ...], default (
72,000, 360,000, 720,000, 1,080,000, 1,440,000, 1,800,000)
Number of examples to keep after each selection round. The first number
represents the initial subset. Increasing sizes represent the active
learning use case. Decreasing sizes represent the core-set selection
use case.
selection_method : str, default 'entropy'
Criteria for selecting examples.
threads : int, default 4
Number of threads.
seed : Optional[int], default None
Random seed for numpy, torch, and others. If None, a random int is
chosen and logged in the experiments config file.
track_test_acc : bool, default True
Calculate performance of the models on the test data in addition or
instead of the validation dataset.'
"""
# Set seeds for reproducibility.
seed = utils.set_random_seed(seed)
# Capture all of the arguments to save alongside the results.
config = utils.capture_config(**locals())
# Create a unique timestamped directory for this experiment.
run_dir = utils.create_run_dir(run_dir, timestamp=config['timestamp'])
utils.save_config(config, run_dir)
# Create proxy directory to be compatible with other scripts.
proxy_run_dir = os.path.join(run_dir, 'proxy')
os.makedirs(proxy_run_dir, exist_ok=True)
# Create the training dataset, which is just the path to the
# normalized training text file. This allows us to work with the
# fasttext executable.
train_dataset = create_dataset(dataset, datasets_dir, train=True)
# Create train indices
with open(train_dataset) as file:
N = sum(1 for line in file)
# Validate sizes
_sizes = np.array(sizes)
assert len(_sizes), 'Need at least two sizes'
assert (_sizes <= N).all(), f'{N} is insufficient sizes {_sizes}'
assert (_sizes > 0).all(), f'Sizes must be positive. Got {_sizes}'
changes = _sizes[1:] - _sizes[:-1]
assert (changes != 0).all(), f'Sizes must change'
signs = np.sign(changes)
assert (signs[1:] == signs[0]).all(), f'Sizes must increase or decrease monotonically' # noqa: E501
if signs[0] > 0:
# Positive signs mean active learning (i.e., add labeled examples)
print(f'Valid active learning experiment: {sizes}')
setting = 'active'
else:
# Negative signs mean core-set selection (i.e., remove examples)
print(f'Valid core-set selection experiment: {sizes}')
setting = 'coreset'
print('{} total examples for training'.format(N))
# Read training labels and calculate the number of classes.
targets = read_labels(train_dataset)
num_classes = len(set(targets))
test_dataset = None
if track_test_acc:
# Create the test dataset, which is just the path to the
# normalized test text file. This allows us to work with the
# fasttext executable.
test_dataset = create_dataset(dataset, datasets_dir, train=False)
# Create initial subset to train fastText.
initial_subset = sizes[0]
print('Selecting initial subset of {}'.format(initial_subset))
# Keep a random subset
labeled = np.random.permutation(np.arange(N))[:initial_subset]
# Pulling from the normalized training data, save a separate file
# with only the examples in `labeled`. This allows us to work
# with the fasttext executable
tag = f'{initial_subset}'
round_dir = os.path.join(proxy_run_dir, tag)
os.makedirs(round_dir, exist_ok=True)
current_path = os.path.join(round_dir, f'shuffled_{tag}.norm')
_ = shuffle_and_index(train_dataset, current_path, kept=labeled)
utils.save_index(labeled, run_dir,
'initial_subset_{}.index'.format(len(labeled)))
# Perform selection(s)
print('Running active learning for budgets: {}'.format(sizes[1:]))
# Skip the first selection, which was performed above.
for next_size in sizes[1:]:
# Train fastText on the labeled data we have so far.
print("Training on {} examples".format(tag))
probs, train_stats, selection_stats = train_fastText(
executable, current_path, train_dataset, test_dataset,
round_dir, tag, num_classes,
lr=learning_rate,
dim=dim,
min_count=min_count,
bucket=bucket,
epoch=epochs,
threads=threads,
ngrams=ngrams,
verbose=True)
utils.save_result(train_stats, os.path.join(run_dir, "proxy.csv"))
print('Selecting examples for size {}'.format(next_size))
# Rank examples based on the probabilities from fastText.
ranking_start = datetime.now()
ranking = calculate_rank(probs, selection_method)
# Select top examples.
if next_size > len(labeled):
# Performing active learning.
# Add top ranking examples to the existing labeled set.
labeled_set = set(labeled)
ranking = [i for i in ranking if i not in labeled_set]
new_indices = ranking[:(next_size - len(labeled))]
selection_stats['current_nexamples'] = len(labeled)
selection_stats['new_nexamples'] = len(new_indices)
labeled = np.concatenate([labeled, new_indices])
elif next_size < len(labeled):
# Performing core-set selection.
# `labeled` should include the entire training set, so
# this is taking a strict subset. This is not
# explicitly enforced to allow for additional
# exploration and experimentation (e.g., what if the
# proxy only looked at a subset of the data before
# selecting the best examples from all the data?).
labeled = ranking[:next_size]
selection_stats['current_nexamples'] = 0
selection_stats['new_nexamples'] = len(labeled)
ranking_time = datetime.now() - ranking_start
assert len(set(labeled)) == next_size
# Pulling from the normalized training data, save a separate file
# with only the examples in `labeled`. This allows us to work
# with the fasttext executable
tag = f'{next_size}'
round_dir = os.path.join(proxy_run_dir, tag)
os.makedirs(round_dir, exist_ok=True)
current_path = os.path.join(round_dir, f'shuffled_{tag}.norm')
_ = shuffle_and_index(train_dataset, current_path, kept=labeled)
utils.save_index(labeled, round_dir,
'labeled_{}.index'.format(len(labeled)))
# Save high-level runtimes for analysis
selection_stats['ranking_time'] = ranking_time
selection_stats['total_time'] = selection_stats['inference_time']
selection_stats['total_time'] += selection_stats['ranking_time']
utils.save_result(selection_stats,
os.path.join(run_dir, 'selection.csv'))
if setting == 'coreset':
# Resave indices to be compatible with core-set experiments
utils.save_index(labeled, run_dir, 'selected.index')
utils.save_index(np.array([], dtype=np.int64), run_dir, 'dev.index')
def create_dataset(dataset: str, dataset_dir: str, train: bool = True) -> str:
"""
Create path to normalized fastText data.
Parameters
----------
dataset : str
Dataset to use in experiment (e.g., amazon_review_full)
datasets_dir : str
Path to datasets.
train: bool, default True
Load training data.
Returns
-------
normalized_data_path : str
"""
dataset_dir = os.path.join(dataset_dir, dataset + '_csv')
mode = 'train' if train else 'test'
# Normalize data for fastText
filename = 'normalized.'
filename += mode
# fastText needs to preprocess data before it can train
normalized_data_path = os.path.join(dataset_dir, filename)
if not os.path.exists(normalized_data_path):
# Preprocessed data doesn't exist, so we need to create it from
# the original data.
print(f"Normalize {mode} data doesn't exist. Going to raw data.")
raw_filename = mode
raw_filename += '.csv'
raw_path = os.path.join(dataset_dir, raw_filename)
assert os.path.exists(raw_path), f"Raw {mode} data doesn't exist: {raw_path}" # noqa: E501
with runtime(f"Normalized raw {mode} data from {raw_path}"):
normalize_text(raw_path, normalized_data_path)
print(f'Using {normalized_data_path} for {mode} data')
return normalized_data_path
@contextmanager
def runtime(text: str):
"""
Calculate wall-clock time for anything run within this context.
Parameters
----------
text : str
Output text while context is running.
"""
print(text, end="")
start = datetime.now()
try:
yield start
finally:
print(" (finished in {})".format(datetime.now() - start))
def normalize_text(input: str, output: str):
"""
Normalize data (examples + labels) according to fastText rules.
Parameters
----------
input : str
Path to raw data.
output : str
Path to output normalized data.
"""
run_command(NORMALIZE_TEXT.format(input=input, output=output))
def shuffle_and_index(prev: str, curr: str, kept: np.array,
mapping: Optional[Mapping[int, int]] = None,
shuffle: bool = True) -> Tuple[np.array, np.array]:
"""
Shuffle lines (i.e., examples) in a text file.
Parameters
----------
prev : str
Path to raw data to shuffle.
curr : str
Path to output shuffled data.
kept : np.array of ints
Line numbers to keep from `prev` text file.
mapping : Optional[Mapping[int, int]], default None
Optionally mapping for indices.
shuffle : bool, default True
Shuffle the line numbers to keep.
Returns
-------
indices : np.array
targets : np.array
"""
with open(prev) as file:
lines = file.readlines()
if shuffle:
kept = np.random.permutation(kept)
indices = np.zeros(len(kept), dtype=np.int64)
targets = np.zeros(len(kept), dtype=np.int64)
with open(curr, 'w') as file:
for index, example in enumerate(kept):
line = lines[example]
match = re.match(LABEL_PATTERN, line)
assert match is not None, "Every line should have a label"
targets[index] = int(match.group(1)) - 1
indices[index] = example if mapping is None else mapping[example]
file.write(line)
pd.Series(indices).to_csv(curr + '.index')
return indices, targets
def train_fastText(command: str, train_file: str, full_train_file: str,
test_file: Optional[str], run_dir: str, tag: str,
num_classes: int,
dim: int = 10, lr: float = 0.05, ngrams: int = 2,
epoch: int = 5, threads: int = 4, min_count: int = 1,
bucket: int = 10000000, verbose=True
) -> Tuple[np.array, Dict[str, Any], Dict[str, Any]]:
"""
Train fastText model.
Parameters
----------
command : str
Path to fasttext executable.
train_file : str
Path to fastText normalized data to use for training.
full_train_file : str
Path to fastText normalized data to calculate probabilities for.
test_file : Optional[str]
Path to fastText normalized data to use for evaluation.
run_dir : str, default './run'
Path to log results and other artifacts.
tag : str
Unique identifier to add to the filenames for the model binary, test
predictions, and probabilities.
num_classes : int
Number of classes for the output probabilities.
dim : int, default 10
Size of word vectors.
learning_rate : float, default 0.05
Learning rate.
ngrams : int, default 2
Max length of word ngram.
epochs : int, default 5
Number of epochs to train.
threads : int, default 4
Number of threads.
min_count : int, default 1
Minimal number of word occurences.
bucket : int, default 10_000_000
Number of buckets.
verbose : bool
Output the command lines used with the fastText executable before
execution.
Returns
-------
probs : np.array
train_stats : Dict[str, Any]
selection_stats : Dict[str, Any]
"""
model_file = os.path.join(run_dir, f"model_{tag}") # noqa: F841
model_bin = model_file + '.bin' # noqa: F841
model_vec = model_file + '.vec' # noqa: F841
test_predict = os.path.join(run_dir, f"test_predict_{tag}.test") # noqa: F841 E501
# Only want probabilities for training data for ranking
train_probs = os.path.join(run_dir, f"train_probs_{tag}.train")
config = {k: v for k, v in locals().items()}
train_stats: Dict[str, Any] = OrderedDict()
selection_stats: Dict[str, Any] = OrderedDict()
with open(train_file) as file:
nlabeled = sum(1 for line in file)
train_stats['nexamples'] = nlabeled
# train
train_start = datetime.now()
command = FASTTEXT_TRAIN.format(**config)
run_command(command, verbose=verbose)
train_time = datetime.now() - train_start
train_stats['train_time'] = train_time
inference_start = datetime.now()
command = FASTTEXT_PROBS.format(**config)
run_command(command, verbose=verbose)
probs = read_fasttext_probs(train_probs, num_classes=num_classes)
inference_time = datetime.now() - inference_start
selection_stats['inference_time'] = inference_time
train_acc = calculate_train_accuracy(full_train_file, probs)
train_stats['train_accuracy'] = train_acc
print(f'Train accuracy: {train_acc}')
if test_file is not None:
# predict test
test_start = datetime.now()
command = FASTTEXT_PREDICT.format(**config)
run_command(command, verbose=verbose)
test_acc = calculate_test_accuracy(config['test_file'], test_predict)
print(f'Test accuracy: {test_acc}')
test_time = datetime.now() - test_start
train_stats['test_time'] = test_time
train_stats['test_accuracy'] = test_acc
return (probs, train_stats, selection_stats)
def read_labels(path: str) -> np.array:
"""
Read labels from fastText normalized files.
Parameters
----------
path : str
Path to fastText normalized file.
Returns
-------
labels : np.array
"""
labels = []
with open(path) as file:
for line in file:
match = re.match(LABEL_PATTERN, line)
assert match is not None
label = int(match.group('label'))
labels.append(label)
return np.array(labels, dtype=np.int64) + 1
def calculate_train_accuracy(targets_path: str, probs: np.array) -> float:
"""
Calculate accuracy on training data.
Parameters
----------
targets_path : str
Path to fastText normalized training data.
probs : np.array
Class probability distribution for each line the `targets_path`
Returns
-------
accuracy : float
"""
targets = read_labels(targets_path)
preds = probs.argmax(axis=1)
assert len(targets) == len(preds)
return (targets == preds).sum() / len(targets)
def calculate_test_accuracy(targets_path: str, preds_path: str) -> float:
"""
Calculate accuracy on test data.
Parameters
----------
targets_path : str
Path to fastText normalized test data.
preds_path : str
Path to fastText normalized predicted labels.
Returns
-------
accuracy : float
"""
targets = read_labels(targets_path)
preds = read_labels(preds_path)
assert len(targets) == len(preds)
return (targets == preds).sum() / len(targets)
def calculate_rank(probs, rank_metric) -> np.array:
"""
Rank examples based on class probabilites.
Parameters
----------
probs : np.array
Class probability distribution for each line the `targets_path`
rank_metric : str
Criteria for selecting examples.
Returns
-------
ranking : np.array
"""
if rank_metric == 'entropy':
entropy = (np.log(probs) * probs).sum(axis=1) * -1.
ranking = entropy.argsort()[::-1]
elif rank_metric == 'least_confidence':
probs = probs.max(axis=1)
ranking = probs.argsort(axis=0)
elif rank_metric == 'random':
N = len(probs)
ranking = np.arange(N)
np.random.shuffle(ranking)
else:
raise NotImplementedError(f"{rank_metric} hasn't been implemented yet")
return ranking
def read_fasttext_probs(path: str,
num_classes: Optional[int] = None) -> np.array:
'''
Read probabilies from fastText's predict-prob command.
Parameters
----------
path : str
Path to output file from fastText's predict-prob command.
num_classes : Optional[int], default None
Specify the number of classes in the output probability
distribution. If None, this is inferred from the first line of
the file.
Returns
-------
probs : np.array
'''
records = []
with open(path, 'r') as file:
for line in file:
split = line.strip().replace('__label__', '').split(' ')
labels = map(lambda x: int(x) - 1, split[::2])
values = map(float, split[1::2])
record = {label: value for label, value in zip(labels, values)}
records.append(record)
if num_classes is None:
num_classes = len(records[0].keys())
df = pd.DataFrame.from_records(records).loc[:, np.arange(num_classes)]
return df.values
def run_command(command, shell=True, verbose=False, required=True):
"""
Run command as subprocess
"""
if verbose:
print(f"Running: {command}")
result = subprocess.call(command, shell=shell)
if required:
assert result == 0, f"Failed command: {command}"
|
the-stack_0_21640 | """
Simple example to retrieving some sample cargo movements in a dataframe.
The below script returns something similar to:
| | events.cargo_port_unload_event.0.start_timestamp | product.group.label | product.grade.label | quantity | vessels.0.name |
|---:|:---------------------------------------------------|:----------------------|:----------------------|-----------:|:-----------------|
| 0 | 2019-10-08T00:41:00+0000 | Crude | Djeno | 123457 | AROME |
| 1 | 2019-11-08T00:41:52+0000 | Crude | Arab Medium | 99898 | SCOOBYDOO |
| 2 | 2019-09-30T23:49:41+0000 | Crude | Arab Heavy | 9879878 | DAVID |
| 3 | 2019-12-01T01:40:00+0000 | Crude | Usan | 999999 | DUCK |
"""
from datetime import datetime
from vortexasdk import CargoMovements
if __name__ == "__main__":
# Query API to find all vessels that were loading on the 1st of Aug 2019
search_result = CargoMovements().search(
filter_activity="loading_start",
filter_time_min=datetime(2019, 8, 1),
filter_time_max=datetime(2019, 8, 2),
)
print("Cargo movements successfully loaded")
# Convert search result to dataframe
df = search_result.to_df()
print(df.head())
|
the-stack_0_21641 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
def load_arguments(self, _):
from azure.cli.core.commands.parameters import get_enum_type
with self.argument_context('rest') as c:
c.argument('method', options_list=['--method', '-m'],
arg_type=get_enum_type(['head', 'get', 'put', 'post', 'delete', 'options', 'patch'], default='get'),
help='HTTP request method')
c.argument('url', options_list=['--url', '--uri', '-u'],
help='Request URL. If it doesn\'t start with a host, '
'CLI assumes it as an Azure resource ID and prefixes it with the ARM endpoint of the current '
'cloud shown by `az cloud show --query endpoints.resourceManager`. Common token '
'{subscriptionId} will be replaced with the current subscription ID specified by `az account '
'set`')
c.argument('headers', nargs='+',
help="Space-separated headers in KEY=VALUE format or JSON string. Use @{file} to load from a file")
c.argument('uri_parameters', options_list=['--uri-parameters', '--url-parameters'], nargs='+',
help='Query parameters in the URL. Space-separated queries in KEY=VALUE format or JSON string. '
'Use @{file} to load from a file')
c.argument('skip_authorization_header', action='store_true', help='Do not auto-append Authorization header')
c.argument('body', options_list=['--body', '-b'],
help='Request body. Use @{file} to load from a file. For quoting issues in different terminals, '
'see https://github.com/Azure/azure-cli/blob/dev/doc/use_cli_effectively.md#quoting-issues')
c.argument('output_file', help='save response payload to a file')
c.argument('resource',
help='Resource url for which CLI should acquire a token from AAD in order to access '
'the service. The token will be placed in the Authorization header. By default, '
'CLI can figure this out based on --url argument, unless you use ones not in the list '
'of "az cloud show --query endpoints"')
|
the-stack_0_21643 | import json
from pulsar.utils.exceptions import http_errors, HttpException
from pulsar.utils.httpurl import is_succesful
from pulsar.utils.httpurl import JSON_CONTENT_TYPES
from pulsar.apps.wsgi import render_error_debug
from ..utils.data import compact_dict
from .cms import Page
# from ..utils.messages import error_message
def raise_http_error(response, method=None, url=None):
if not is_succesful(response.status_code):
if response.status_code:
content = response.text
# if isinstance(content, dict):
# content = content.get('message', '')
# if method and url:
# content = '%s %s => %s' % (method, url, content)
ErrorClass = http_errors.get(response.status_code)
if ErrorClass:
raise ErrorClass(content)
else:
raise HttpException(content, status=response.status_code)
else:
raise HttpException
class ShellError(Exception):
def __init__(self, msg, code):
super().__init__(msg)
self.code = code
def http_assert(assertion, errorCls, *args):
if not assertion:
raise errorCls(*args)
def json_message(request, message, errors=None, **obj):
"""Create a JSON message to return to clients
"""
obj = compact_dict(**obj)
obj['message'] = message
if errors:
obj['errors'] = errors
return obj
def error_handler(request, exc):
"""Default renderer for errors."""
app = request.app
response = request.response
if not response.content_type:
content_type = request.get('default.content_type')
if content_type:
if isinstance(content_type, str):
content_type = (content_type,)
response.content_type = request.content_types.best_match(
content_type)
content_type = ''
if response.content_type:
content_type = response.content_type.split(';')[0]
errors = None
message = None
if hasattr(exc, 'args') and exc.args:
errors = exc.args[0]
if isinstance(errors, str):
errors = None
message = errors
if not message:
message = (
app.config['ERROR_MESSAGES'].get(response.status_code) or
response.status
)
is_html = (content_type == 'text/html')
trace = None
if response.status_code == 500 and app.debug:
trace = render_error_debug(request, exc, is_html)
if content_type in JSON_CONTENT_TYPES:
return json.dumps(json_message(request, message,
errors=errors, trace=trace))
elif is_html:
context = {'status_code': response.status_code,
'status_message': trace or message}
page = Page(body_template=(
'%s.html' % response.status_code, 'error.html')
)
return app.cms.page_response(
request, page, context, title=response.status
)
elif content_type[-4:] == '/xml':
return XML_ERROR % (response.status_code, message)
elif trace:
return '\n'.join(trace)
else:
return message
XML_ERROR = """<error-page>
<error-code>%s</error-code>
<message>%s</message>
</error-page>"""
|
the-stack_0_21644 | import pytest
from util.html import html2text
@pytest.mark.parametrize('input, expected', [
('hello world', 'hello world'),
('hello <strong>world</strong>', 'hello *world*'),
('<ul><li>foo</li><li>bar</li><li>baz</li></ul>', '* foo\n* bar\n* baz'),
('<hr>', ('-' * 80)),
('<a href="foo">bar</a>', '[bar](foo)'),
])
def test_html2text(input, expected):
assert html2text(input) == expected
|
the-stack_0_21645 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("twitter", "0005_auto_20150730_1350"),
]
operations = [
migrations.AlterModelOptions(
name="account", options={"ordering": ["-time_created"]},
),
migrations.RemoveField(model_name="account", name="screen_name",),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.